From ff1dba398dc6be76486fd94dfb887237c82e33f8 Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Thu, 2 Feb 2023 09:35:13 +0100 Subject: [PATCH] [codegen] Merge Turbo and Macro assemblers There is no real difference between MacroAssembler and TurboAssembler anymore. Initially the idea was to differentiate thread-safe operations, but it got out of hand. With LocalHeaps we could ensure differently by passing a local_isolate. In this CL: TurboAssemblerBase was renamed to MacroAssemblerBase The file containing it also renamed from turbo-assembler to macro-assembler-base. TurboAssembler and MacroAssembler were merged into MacroAssembler in each of the architectures. turbo-assembler-unittests-arch were included in macro-assembler-unittests-arch tasm renamed to masm Bug: v8:13707 Change-Id: I716bbfc51b33ac890c72e8541e01af0af41b6770 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4212396 Commit-Queue: Victor Gomes Reviewed-by: Andreas Haas Reviewed-by: Jakob Linke Cr-Commit-Position: refs/heads/main@{#85610} --- BUILD.bazel | 4 +- BUILD.gn | 4 +- src/baseline/arm/baseline-assembler-arm-inl.h | 4 +- .../arm64/baseline-assembler-arm64-inl.h | 4 +- .../ia32/baseline-assembler-ia32-inl.h | 4 +- .../loong64/baseline-assembler-loong64-inl.h | 4 +- .../mips64/baseline-assembler-mips64-inl.h | 4 +- src/baseline/ppc/baseline-assembler-ppc-inl.h | 4 +- .../riscv/baseline-assembler-riscv-inl.h | 2 +- .../s390/baseline-assembler-s390-inl.h | 4 +- src/baseline/x64/baseline-assembler-x64-inl.h | 4 +- src/builtins/arm/builtins-arm.cc | 26 +- src/builtins/arm64/builtins-arm64.cc | 59 +- src/builtins/ia32/builtins-ia32.cc | 24 +- src/builtins/loong64/builtins-loong64.cc | 26 +- src/builtins/mips64/builtins-mips64.cc | 26 +- src/builtins/ppc/builtins-ppc.cc | 26 +- src/builtins/riscv/builtins-riscv.cc | 2 +- src/builtins/s390/builtins-s390.cc | 26 +- src/builtins/x64/builtins-x64.cc | 26 +- src/codegen/arm/assembler-arm.h | 2 +- src/codegen/arm/macro-assembler-arm.cc | 306 +++--- src/codegen/arm/macro-assembler-arm.h | 83 +- src/codegen/arm64/macro-assembler-arm64-inl.h | 308 +++--- src/codegen/arm64/macro-assembler-arm64.cc | 393 +++---- src/codegen/arm64/macro-assembler-arm64.h | 192 ++-- src/codegen/ia32/macro-assembler-ia32.cc | 180 ++-- src/codegen/ia32/macro-assembler-ia32.h | 24 +- .../loong64/macro-assembler-loong64.cc | 440 ++++---- src/codegen/loong64/macro-assembler-loong64.h | 79 +- ...o-assembler.cc => macro-assembler-base.cc} | 22 +- ...rbo-assembler.h => macro-assembler-base.h} | 34 +- src/codegen/macro-assembler.h | 24 +- src/codegen/mips64/assembler-mips64.cc | 2 +- src/codegen/mips64/assembler-mips64.h | 2 +- src/codegen/mips64/macro-assembler-mips64.cc | 602 +++++------ src/codegen/mips64/macro-assembler-mips64.h | 145 ++- src/codegen/ppc/assembler-ppc.h | 2 +- src/codegen/ppc/macro-assembler-ppc.cc | 692 ++++++------- src/codegen/ppc/macro-assembler-ppc.h | 25 +- src/codegen/riscv/macro-assembler-riscv.cc | 742 ++++++------- src/codegen/riscv/macro-assembler-riscv.h | 129 ++- src/codegen/s390/assembler-s390.h | 2 +- src/codegen/s390/macro-assembler-s390.cc | 976 +++++++++--------- src/codegen/s390/macro-assembler-s390.h | 28 +- .../macro-assembler-shared-ia32-x64.cc | 288 +++--- .../macro-assembler-shared-ia32-x64.h | 41 +- src/codegen/x64/macro-assembler-x64.cc | 500 ++++----- src/codegen/x64/macro-assembler-x64.h | 35 +- .../backend/arm/code-generator-arm.cc | 202 ++-- .../backend/arm/instruction-selector-arm.cc | 4 +- .../backend/arm64/code-generator-arm64.cc | 190 ++-- .../arm64/instruction-selector-arm64.cc | 4 +- src/compiler/backend/code-generator-impl.h | 4 +- src/compiler/backend/code-generator.cc | 132 +-- src/compiler/backend/code-generator.h | 4 +- .../backend/ia32/code-generator-ia32.cc | 105 +- .../backend/ia32/instruction-selector-ia32.cc | 4 +- src/compiler/backend/instruction-selector.cc | 2 +- .../backend/loong64/code-generator-loong64.cc | 80 +- .../loong64/instruction-selector-loong64.cc | 4 +- .../backend/mips64/code-generator-mips64.cc | 486 ++++----- .../mips64/instruction-scheduler-mips64.cc | 2 +- .../backend/ppc/code-generator-ppc.cc | 54 +- .../backend/riscv/code-generator-riscv.cc | 54 +- .../riscv/instruction-scheduler-riscv.cc | 2 +- .../riscv/instruction-selector-riscv32.cc | 2 +- .../riscv/instruction-selector-riscv64.cc | 2 +- .../backend/s390/code-generator-s390.cc | 44 +- .../backend/x64/code-generator-x64.cc | 232 ++--- .../backend/x64/instruction-selector-x64.cc | 6 +- src/compiler/basic-block-instrumentor.cc | 2 +- src/compiler/pipeline.cc | 8 +- src/diagnostics/unwinding-info-win64.cc | 2 +- src/execution/isolate-data.h | 4 +- src/maglev/arm64/maglev-ir-arm64.cc | 2 +- src/maglev/x64/maglev-ir-x64.cc | 4 +- .../arm64/regexp-macro-assembler-arm64.cc | 8 +- src/wasm/baseline/arm/liftoff-assembler-arm.h | 46 +- .../baseline/arm64/liftoff-assembler-arm64.h | 4 +- .../baseline/ia32/liftoff-assembler-ia32.h | 54 +- src/wasm/baseline/liftoff-assembler.cc | 2 +- src/wasm/baseline/liftoff-assembler.h | 2 +- .../loong64/liftoff-assembler-loong64.h | 302 +++--- .../mips64/liftoff-assembler-mips64.h | 368 +++---- src/wasm/baseline/ppc/liftoff-assembler-ppc.h | 30 +- .../baseline/riscv/liftoff-assembler-riscv.h | 30 +- .../riscv/liftoff-assembler-riscv32.h | 188 ++-- .../riscv/liftoff-assembler-riscv64.h | 156 +-- .../baseline/s390/liftoff-assembler-s390.h | 6 +- src/wasm/baseline/x64/liftoff-assembler-x64.h | 55 +- src/wasm/jump-table-assembler.cc | 2 +- src/wasm/jump-table-assembler.h | 4 +- test/cctest/compiler/test-code-generator.cc | 12 +- test/cctest/test-assembler-arm64.cc | 12 +- test/cctest/test-assembler-mips64.cc | 10 +- test/unittests/BUILD.gn | 18 +- ...est.cc => macro-assembler-arm-unittest.cc} | 34 +- .../macro-assembler-arm64-unittest.cc | 331 ++++-- ...st.cc => macro-assembler-ia32-unittest.cc} | 16 +- ...cc => macro-assembler-loong64-unittest.cc} | 16 +- ....cc => macro-assembler-mips64-unittest.cc} | 16 +- ...est.cc => macro-assembler-ppc-unittest.cc} | 75 +- ...t.cc => macro-assembler-riscv-unittest.cc} | 16 +- ...st.cc => macro-assembler-s390-unittest.cc} | 16 +- .../assembler/macro-assembler-x64-unittest.cc | 53 +- .../turbo-assembler-arm64-unittest.cc | 254 ----- .../assembler/turbo-assembler-x64-unittest.cc | 65 -- 108 files changed, 5101 insertions(+), 5322 deletions(-) rename src/codegen/{turbo-assembler.cc => macro-assembler-base.cc} (87%) rename src/codegen/{turbo-assembler.h => macro-assembler-base.h} (81%) rename test/unittests/assembler/{turbo-assembler-arm-unittest.cc => macro-assembler-arm-unittest.cc} (86%) rename test/unittests/assembler/{turbo-assembler-ia32-unittest.cc => macro-assembler-ia32-unittest.cc} (82%) rename test/unittests/assembler/{turbo-assembler-loong64-unittest.cc => macro-assembler-loong64-unittest.cc} (83%) rename test/unittests/assembler/{turbo-assembler-mips64-unittest.cc => macro-assembler-mips64-unittest.cc} (83%) rename test/unittests/assembler/{turbo-assembler-ppc-unittest.cc => macro-assembler-ppc-unittest.cc} (58%) rename test/unittests/assembler/{turbo-assembler-riscv-unittest.cc => macro-assembler-riscv-unittest.cc} (83%) rename test/unittests/assembler/{turbo-assembler-s390-unittest.cc => macro-assembler-s390-unittest.cc} (83%) delete mode 100644 test/unittests/assembler/turbo-assembler-arm64-unittest.cc delete mode 100644 test/unittests/assembler/turbo-assembler-x64-unittest.cc diff --git a/BUILD.bazel b/BUILD.bazel index 7f7441ab55..5fd776494f 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1243,8 +1243,8 @@ filegroup( "src/codegen/tick-counter.h", "src/codegen/tnode.cc", "src/codegen/tnode.h", - "src/codegen/turbo-assembler.cc", - "src/codegen/turbo-assembler.h", + "src/codegen/macro-assembler-base.cc", + "src/codegen/macro-assembler-base.h", "src/codegen/unoptimized-compilation-info.cc", "src/codegen/unoptimized-compilation-info.h", "src/common/assert-scope.cc", diff --git a/BUILD.gn b/BUILD.gn index 8d1a7b496d..024740c429 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -2833,6 +2833,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/interface-descriptors.h", "src/codegen/label.h", "src/codegen/machine-type.h", + "src/codegen/macro-assembler-base.h", "src/codegen/macro-assembler-inl.h", "src/codegen/macro-assembler.h", "src/codegen/maglev-safepoint-table.h", @@ -2853,7 +2854,6 @@ v8_header_set("v8_internal_headers") { "src/codegen/source-position.h", "src/codegen/tick-counter.h", "src/codegen/tnode.h", - "src/codegen/turbo-assembler.h", "src/codegen/unoptimized-compilation-info.h", "src/common/assert-scope.h", "src/common/checks.h", @@ -4581,6 +4581,7 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/handler-table.cc", "src/codegen/interface-descriptors.cc", "src/codegen/machine-type.cc", + "src/codegen/macro-assembler-base.cc", "src/codegen/maglev-safepoint-table.cc", "src/codegen/optimized-compilation-info.cc", "src/codegen/pending-optimization-table.cc", @@ -4591,7 +4592,6 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/source-position.cc", "src/codegen/tick-counter.cc", "src/codegen/tnode.cc", - "src/codegen/turbo-assembler.cc", "src/codegen/unoptimized-compilation-info.cc", "src/common/assert-scope.cc", "src/common/code-memory-access.cc", diff --git a/src/baseline/arm/baseline-assembler-arm-inl.h b/src/baseline/arm/baseline-assembler-arm-inl.h index 15ee64f686..5d6d70c558 100644 --- a/src/baseline/arm/baseline-assembler-arm-inl.h +++ b/src/baseline/arm/baseline-assembler-arm-inl.h @@ -570,8 +570,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/arm64/baseline-assembler-arm64-inl.h b/src/baseline/arm64/baseline-assembler-arm64-inl.h index 02256fbd11..108a8fee4d 100644 --- a/src/baseline/arm64/baseline-assembler-arm64-inl.h +++ b/src/baseline/arm64/baseline-assembler-arm64-inl.h @@ -571,7 +571,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base, { const int instruction_count = num_labels * instructions_per_label + instructions_per_jump_target; - TurboAssembler::BlockPoolsScope block_pools(masm_, + MacroAssembler::BlockPoolsScope block_pools(masm_, instruction_count * kInstrSize); __ Bind(&table); for (int i = 0; i < num_labels; ++i) { @@ -630,7 +630,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIncludesReceiver); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/ia32/baseline-assembler-ia32-inl.h b/src/baseline/ia32/baseline-assembler-ia32-inl.h index 68aa0eeab6..6a1178a144 100644 --- a/src/baseline/ia32/baseline-assembler-ia32-inl.h +++ b/src/baseline/ia32/baseline-assembler-ia32-inl.h @@ -539,8 +539,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { // Drop receiver + arguments. __ masm()->DropArguments(params_size, scratch, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/loong64/baseline-assembler-loong64-inl.h b/src/baseline/loong64/baseline-assembler-loong64-inl.h index 546854e73f..ec2d34d121 100644 --- a/src/baseline/loong64/baseline-assembler-loong64-inl.h +++ b/src/baseline/loong64/baseline-assembler-loong64-inl.h @@ -533,8 +533,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/mips64/baseline-assembler-mips64-inl.h b/src/baseline/mips64/baseline-assembler-mips64-inl.h index 522efd23c8..9aef927dbd 100644 --- a/src/baseline/mips64/baseline-assembler-mips64-inl.h +++ b/src/baseline/mips64/baseline-assembler-mips64-inl.h @@ -544,8 +544,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/ppc/baseline-assembler-ppc-inl.h b/src/baseline/ppc/baseline-assembler-ppc-inl.h index 4196551aa1..a952192196 100644 --- a/src/baseline/ppc/baseline-assembler-ppc-inl.h +++ b/src/baseline/ppc/baseline-assembler-ppc-inl.h @@ -684,8 +684,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/riscv/baseline-assembler-riscv-inl.h b/src/baseline/riscv/baseline-assembler-riscv-inl.h index 9b5a1e6450..c735251a6c 100644 --- a/src/baseline/riscv/baseline-assembler-riscv-inl.h +++ b/src/baseline/riscv/baseline-assembler-riscv-inl.h @@ -508,7 +508,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base, __ CalcScaledAddress(t6, t6, reg, entry_size_log2); __ Jump(t6); { - TurboAssembler::BlockTrampolinePoolScope(masm()); + MacroAssembler::BlockTrampolinePoolScope(masm()); __ BlockTrampolinePoolFor(num_labels * kInstrSize * 2); __ bind(&table); for (int i = 0; i < num_labels; ++i) { diff --git a/src/baseline/s390/baseline-assembler-s390-inl.h b/src/baseline/s390/baseline-assembler-s390-inl.h index 087c4f1b12..b183fda183 100644 --- a/src/baseline/s390/baseline-assembler-s390-inl.h +++ b/src/baseline/s390/baseline-assembler-s390-inl.h @@ -692,8 +692,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/x64/baseline-assembler-x64-inl.h b/src/baseline/x64/baseline-assembler-x64-inl.h index fe57df754a..8b12a9ab2c 100644 --- a/src/baseline/x64/baseline-assembler-x64-inl.h +++ b/src/baseline/x64/baseline-assembler-x64-inl.h @@ -587,8 +587,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { // Drop receiver + arguments. __ masm()->DropArguments(params_size, scratch, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/builtins/arm/builtins-arm.cc b/src/builtins/arm/builtins-arm.cc index be2d6505e2..bdf9df508d 100644 --- a/src/builtins/arm/builtins-arm.cc +++ b/src/builtins/arm/builtins-arm.cc @@ -130,8 +130,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ DropArguments(scratch, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(scratch, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ Jump(lr); __ bind(&stack_overflow); @@ -278,8 +278,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(r1, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(r1, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ Jump(lr); __ bind(&check_receiver); @@ -826,8 +826,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ LeaveFrame(StackFrame::INTERPRETED); // Drop receiver + arguments. - __ DropArguments(params_size, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1352,7 +1352,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, __ sub(start_address, start_address, scratch); // Push the arguments. __ PushArray(start_address, num_args, scratch, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static @@ -1820,8 +1820,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg __ cmp(r0, Operand(JSParameterCount(2)), ge); __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray - __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r0, r5, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1897,8 +1897,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument __ cmp(r0, Operand(JSParameterCount(3)), ge); __ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList - __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r0, r5, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1940,8 +1940,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList __ cmp(r0, Operand(JSParameterCount(3)), ge); __ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target - __ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r0, r4, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- diff --git a/src/builtins/arm64/builtins-arm64.cc b/src/builtins/arm64/builtins-arm64.cc index 79ce842737..b905186f53 100644 --- a/src/builtins/arm64/builtins-arm64.cc +++ b/src/builtins/arm64/builtins-arm64.cc @@ -163,7 +163,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ DropArguments(x1, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(x1, MacroAssembler::kCountIncludesReceiver); __ Ret(); __ Bind(&stack_overflow); @@ -348,7 +348,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Leave construct frame. __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(x1, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(x1, MacroAssembler::kCountIncludesReceiver); __ Ret(); // Otherwise we do a smi check and fall through to check if the return value @@ -1205,7 +1205,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { { ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); // Drop the frame created by the baseline call. - __ Pop(fp, lr); + __ Pop(fp, lr); __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ Trap(); } @@ -1330,7 +1330,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( // the frame (that is done below). __ Bind(&push_stack_frame); FrameScope frame_scope(masm, StackFrame::MANUAL); - __ Push(lr, fp); + __ Push(lr, fp); __ mov(fp, sp); __ Push(cp, closure); @@ -1342,7 +1342,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Push actual argument count, bytecode array, Smi tagged bytecode array // offset and an undefined (to properly align the stack pointer). - static_assert(TurboAssembler::kExtraSlotClaimedByPrologue == 1); + static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1); __ SmiTag(x6, kInterpreterBytecodeOffsetRegister); __ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister); __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); @@ -1582,7 +1582,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, } __ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy, - TurboAssembler::kDstLessThanSrcAndReverse); + MacroAssembler::kDstLessThanSrcAndReverse); if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { // Store "undefined" as the receiver arg if we need to. @@ -1882,7 +1882,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, // Restore fp, lr. __ Mov(sp, fp); - __ Pop(fp, lr); + __ Pop(fp, lr); __ LoadEntryFromBuiltinIndex(builtin); __ Jump(builtin); @@ -2069,7 +2069,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ Peek(arg_array, 2 * kSystemPointerSize); __ bind(&done); } - __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc, MacroAssembler::kCountIncludesReceiver); __ PushArgument(this_arg); // ----------- S t a t e ------------- @@ -2158,7 +2158,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { __ SlotAddress(copy_from, count); __ Add(copy_to, copy_from, kSystemPointerSize); __ CopyDoubleWords(copy_to, copy_from, count, - TurboAssembler::kSrcLessThanDst); + MacroAssembler::kSrcLessThanDst); __ Drop(2); } @@ -2206,7 +2206,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ Peek(arguments_list, 3 * kSystemPointerSize); __ bind(&done); } - __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc, MacroAssembler::kCountIncludesReceiver); __ PushArgument(this_argument); // ----------- S t a t e ------------- @@ -2264,7 +2264,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ bind(&done); } - __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc, MacroAssembler::kCountIncludesReceiver); // Push receiver (undefined). __ PushArgument(undefined_value); @@ -2662,7 +2662,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ SlotAddress(copy_to, total_argc); __ Sub(copy_from, copy_to, kSystemPointerSize); __ CopyDoubleWords(copy_to, copy_from, argc, - TurboAssembler::kSrcLessThanDst); + MacroAssembler::kSrcLessThanDst); } } @@ -2996,7 +2996,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { // Save registers. __ PushXRegList(kSavedGpRegs); __ PushQRegList(kSavedFpRegs); - __ Push(lr, xzr); // xzr is for alignment. + __ Push(lr, xzr); // xzr is for alignment. // Arguments to the runtime function: instance, func_index, and an // additional stack slot for the NativeModule. The first pushed register @@ -3008,7 +3008,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ Mov(vector, kReturnRegister0); // Restore registers and frame type. - __ Pop(xzr, lr); + __ Pop(xzr, lr); __ PopQRegList(kSavedFpRegs); __ PopXRegList(kSavedGpRegs); // Restore the instance from the frame. @@ -3263,8 +3263,8 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance, // Update active continuation root. int32_t active_continuation_offset = - TurboAssembler::RootRegisterOffsetForRootIndex( - RootIndex::kActiveContinuation); + MacroAssembler::RootRegisterOffsetForRootIndex( + RootIndex::kActiveContinuation); __ Str(parent, MemOperand(kRootRegister, active_continuation_offset)); jmpbuf = parent; __ LoadExternalPointerField( @@ -3313,8 +3313,8 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1, __ StoreTaggedField(tmp2, state_loc); __ bind(&undefined); int32_t active_suspender_offset = - TurboAssembler::RootRegisterOffsetForRootIndex( - RootIndex::kActiveSuspender); + MacroAssembler::RootRegisterOffsetForRootIndex( + RootIndex::kActiveSuspender); __ Str(suspender, MemOperand(kRootRegister, active_suspender_offset)); } @@ -4317,7 +4317,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { // expected to be on the top of the stack). // We cannot use just the ret instruction for this, because we cannot pass // the number of slots to remove in a Register as an argument. - __ DropArguments(param_count, TurboAssembler::kCountExcludesReceiver); + __ DropArguments(param_count, MacroAssembler::kCountExcludesReceiver); __ Ret(lr); // ------------------------------------------- @@ -4522,14 +4522,15 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { FieldMemOperand(suspender_continuation, WasmContinuationObject::kParentOffset)); int32_t active_continuation_offset = - TurboAssembler::RootRegisterOffsetForRootIndex( - RootIndex::kActiveContinuation); + MacroAssembler::RootRegisterOffsetForRootIndex( + RootIndex::kActiveContinuation); __ Str(caller, MemOperand(kRootRegister, active_continuation_offset)); DEFINE_REG(parent); __ LoadAnyTaggedField( parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset)); int32_t active_suspender_offset = - TurboAssembler::RootRegisterOffsetForRootIndex(RootIndex::kActiveSuspender); + MacroAssembler::RootRegisterOffsetForRootIndex( + RootIndex::kActiveSuspender); __ Str(parent, MemOperand(kRootRegister, active_suspender_offset)); regs.ResetExcept(promise, caller); @@ -4660,8 +4661,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { scratch, FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset)); int32_t active_suspender_offset = - TurboAssembler::RootRegisterOffsetForRootIndex( - RootIndex::kActiveSuspender); + MacroAssembler::RootRegisterOffsetForRootIndex( + RootIndex::kActiveSuspender); __ Str(suspender, MemOperand(kRootRegister, active_suspender_offset)); // Next line we are going to load a field from suspender, but we have to use @@ -4685,8 +4686,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { active_continuation, kLRHasBeenSaved, SaveFPRegsMode::kIgnore); FREE_REG(active_continuation); int32_t active_continuation_offset = - TurboAssembler::RootRegisterOffsetForRootIndex( - RootIndex::kActiveContinuation); + MacroAssembler::RootRegisterOffsetForRootIndex( + RootIndex::kActiveContinuation); __ Str(target_continuation, MemOperand(kRootRegister, active_continuation_offset)); @@ -4731,7 +4732,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { __ bind(&suspend); __ LeaveFrame(StackFrame::STACK_SWITCH); // Pop receiver + parameter. - __ DropArguments(2, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(2, MacroAssembler::kCountIncludesReceiver); __ Ret(lr); } } // namespace @@ -5384,9 +5385,9 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { // DirectCEntry places the return address on the stack (updated by the GC), // making the call GC safe. The irregexp backend relies on this. - __ Poke(lr, 0); // Store the return address. + __ Poke(lr, 0); // Store the return address. __ Blr(x10); // Call the C++ function. - __ Peek(lr, 0); // Return to calling code. + __ Peek(lr, 0); // Return to calling code. __ AssertFPCRState(); __ Ret(); } diff --git a/src/builtins/ia32/builtins-ia32.cc b/src/builtins/ia32/builtins-ia32.cc index 6dbdc29c89..04205c34f5 100644 --- a/src/builtins/ia32/builtins-ia32.cc +++ b/src/builtins/ia32/builtins-ia32.cc @@ -125,8 +125,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(edx, ecx, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ ret(0); __ bind(&stack_overflow); @@ -280,8 +280,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(edx, ecx, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ ret(0); // Otherwise we do a smi check and fall through to check if the return value @@ -768,8 +768,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ leave(); // Drop receiver + arguments. - __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, scratch2, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1810,8 +1810,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { } __ bind(&no_this_arg); __ DropArgumentsAndPushNewReceiver(eax, edi, ecx, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); // Restore receiver to edi. __ movd(edi, xmm0); @@ -1919,8 +1919,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ movd(xmm0, edx); __ DropArgumentsAndPushNewReceiver(eax, ecx, edx, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); // Restore argumentsList. __ movd(edx, xmm0); @@ -1978,8 +1978,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ DropArgumentsAndPushNewReceiver( eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); // Restore argumentsList. __ movd(ecx, xmm0); diff --git a/src/builtins/loong64/builtins-loong64.cc b/src/builtins/loong64/builtins-loong64.cc index 2fe4a2a914..c5d18055d0 100644 --- a/src/builtins/loong64/builtins-loong64.cc +++ b/src/builtins/loong64/builtins-loong64.cc @@ -112,8 +112,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ DropArguments(t3, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver, t3); + __ DropArguments(t3, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver, t3); __ Ret(); } @@ -267,8 +267,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(a1, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver, a4); + __ DropArguments(a1, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver, a4); __ Ret(); __ bind(&check_receiver); @@ -803,8 +803,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ LeaveFrame(StackFrame::INTERPRETED); // Drop receiver + arguments. - __ DropArguments(params_size, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1328,7 +1328,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, // Push the arguments. __ PushArray(start_address, num_args, scratch, scratch2, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static @@ -1794,8 +1794,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ Movz(arg_array, undefined_value, scratch); // if argc == 1 __ Ld_d(receiver, MemOperand(sp, 0)); __ DropArgumentsAndPushNewReceiver(argc, this_arg, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1889,8 +1889,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ Movz(arguments_list, undefined_value, scratch); // if argc == 2 __ DropArgumentsAndPushNewReceiver(argc, this_argument, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1949,8 +1949,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ Movz(new_target, target, scratch); // if argc == 2 __ DropArgumentsAndPushNewReceiver(argc, undefined_value, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- diff --git a/src/builtins/mips64/builtins-mips64.cc b/src/builtins/mips64/builtins-mips64.cc index 3329065e97..cf40b1062f 100644 --- a/src/builtins/mips64/builtins-mips64.cc +++ b/src/builtins/mips64/builtins-mips64.cc @@ -112,8 +112,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ DropArguments(t3, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver, t3); + __ DropArguments(t3, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver, t3); __ Ret(); } @@ -267,8 +267,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(a1, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver, a4); + __ DropArguments(a1, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver, a4); __ Ret(); __ bind(&check_receiver); @@ -804,8 +804,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ LeaveFrame(StackFrame::INTERPRETED); // Drop receiver + arguments. - __ DropArguments(params_size, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1320,7 +1320,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, // Push the arguments. __ PushArray(start_address, num_args, scratch, scratch2, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static @@ -1784,8 +1784,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ Movz(arg_array, undefined_value, scratch); // if argc == 1 __ Ld(receiver, MemOperand(sp)); __ DropArgumentsAndPushNewReceiver(argc, this_arg, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1881,8 +1881,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ Movz(arguments_list, undefined_value, scratch); // if argc == 2 __ DropArgumentsAndPushNewReceiver(argc, this_argument, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1941,8 +1941,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ Movz(new_target, target, scratch); // if argc == 2 __ DropArgumentsAndPushNewReceiver(argc, undefined_value, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc index a1c992864f..4e99cfc074 100644 --- a/src/builtins/ppc/builtins-ppc.cc +++ b/src/builtins/ppc/builtins-ppc.cc @@ -361,8 +361,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // Leave construct frame. } // Remove caller arguments from the stack and return. - __ DropArguments(scratch, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(scratch, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ blr(); __ bind(&stack_overflow); @@ -611,8 +611,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(r4, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(r4, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ blr(); __ bind(&check_receiver); @@ -1119,8 +1119,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, // Leave the frame (also dropping the register file). __ LeaveFrame(StackFrame::INTERPRETED); - __ DropArguments(params_size, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1636,7 +1636,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, __ sub(start_address, start_address, scratch); // Push the arguments. __ PushArray(start_address, num_args, scratch, r0, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static @@ -2027,8 +2027,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2111,8 +2111,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2160,8 +2160,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ blt(&done); __ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r3, r7, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- diff --git a/src/builtins/riscv/builtins-riscv.cc b/src/builtins/riscv/builtins-riscv.cc index 030595c51d..982c0154e5 100644 --- a/src/builtins/riscv/builtins-riscv.cc +++ b/src/builtins/riscv/builtins-riscv.cc @@ -1381,7 +1381,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, // Push the arguments. __ PushArray(start_address, num_args, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static diff --git a/src/builtins/s390/builtins-s390.cc b/src/builtins/s390/builtins-s390.cc index 7cb579da48..0727b7462d 100644 --- a/src/builtins/s390/builtins-s390.cc +++ b/src/builtins/s390/builtins-s390.cc @@ -428,8 +428,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // Leave construct frame. } // Remove caller arguments from the stack and return. - __ DropArguments(scratch, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(scratch, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ Ret(); __ bind(&stack_overflow); @@ -584,8 +584,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(r3, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(r3, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ Ret(); __ bind(&check_receiver); @@ -1148,8 +1148,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, // Leave the frame (also dropping the register file). __ LeaveFrame(StackFrame::INTERPRETED); - __ DropArguments(params_size, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1657,7 +1657,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, __ SubS64(start_address, start_address, scratch); // Push the arguments. __ PushArray(start_address, num_args, r1, scratch, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static @@ -2022,8 +2022,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r2, r7, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2107,8 +2107,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r2, r7, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2157,8 +2157,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ blt(&done); __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r2, r6, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r2, r6, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc index c93632d92d..6ebe0ab579 100644 --- a/src/builtins/x64/builtins-x64.cc +++ b/src/builtins/x64/builtins-x64.cc @@ -125,7 +125,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // Remove caller arguments from the stack and return. __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIncludesReceiver); __ ret(0); @@ -282,7 +282,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIncludesReceiver); __ ret(0); // If the result is a smi, it is *not* an object in the ECMA sense. @@ -890,8 +890,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ leave(); // Drop receiver + arguments. - __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, scratch2, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Tail-call |function_id| if |actual_state| == |expected_state| @@ -1265,7 +1265,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, kSystemPointerSize)); // Push the arguments. __ PushArray(start_address, num_args, scratch, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static @@ -1814,8 +1814,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { } __ bind(&no_this_arg); __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1919,8 +1919,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ movq(rbx, args[3]); // argumentsList __ bind(&done); __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1971,8 +1971,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ bind(&done); __ DropArgumentsAndPushNewReceiver( rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -3812,8 +3812,8 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { // expected to be on the top of the stack). // We cannot use just the ret instruction for this, because we cannot pass the // number of slots to remove in a Register as an argument. - __ DropArguments(param_count, rbx, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountExcludesReceiver); + __ DropArguments(param_count, rbx, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountExcludesReceiver); __ ret(0); // -------------------------------------------------------------------------- diff --git a/src/codegen/arm/assembler-arm.h b/src/codegen/arm/assembler-arm.h index 592491db4d..cd68628b24 100644 --- a/src/codegen/arm/assembler-arm.h +++ b/src/codegen/arm/assembler-arm.h @@ -1435,7 +1435,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope { private: friend class Assembler; - friend class TurboAssembler; + friend class MacroAssembler; template bool CanAcquireVfp() const; diff --git a/src/codegen/arm/macro-assembler-arm.cc b/src/codegen/arm/macro-assembler-arm.cc index 2055bd8157..2588de4784 100644 --- a/src/codegen/arm/macro-assembler-arm.cc +++ b/src/codegen/arm/macro-assembler-arm.cc @@ -42,7 +42,7 @@ namespace v8 { namespace internal { -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const { @@ -59,7 +59,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -77,7 +77,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -95,7 +95,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); @@ -106,11 +106,11 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, ldr(destination, MemOperand(destination, offset)); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { ldr(destination, MemOperand(kRootRegister, offset)); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { Move(destination, kRootRegister); @@ -119,7 +119,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -MemOperand TurboAssembler::ExternalReferenceAsOperand( +MemOperand MacroAssembler::ExternalReferenceAsOperand( ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t offset = @@ -148,20 +148,20 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand( return MemOperand(scratch, 0); } -void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); } +void MacroAssembler::Jump(Register target, Condition cond) { bx(target, cond); } -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond) { mov(pc, Operand(target, rmode), LeaveCC, cond); } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(static_cast(target), rmode, cond); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond) { DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK_IMPLIES(options().isolate_independent_code, @@ -177,20 +177,20 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, Jump(static_cast(code.address()), rmode, cond); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); Move(scratch, reference); Jump(scratch); } -void TurboAssembler::Call(Register target, Condition cond) { +void MacroAssembler::Call(Register target, Condition cond) { // Block constant pool for the call instruction sequence. BlockConstPoolScope block_const_pool(this); blx(target, cond); } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, TargetAddressStorageMode mode, bool check_constant_pool) { // Check if we have to emit the constant pool before we block it. @@ -225,7 +225,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, } } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, Condition cond, TargetAddressStorageMode mode, bool check_constant_pool) { DCHECK(RelocInfo::IsCodeTarget(rmode)); @@ -242,7 +242,7 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(code.address(), rmode, cond, mode); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ASM_CODE_COMMENT(this); static_assert(kSystemPointerSize == 4); static_assert(kSmiShiftSize == 0); @@ -258,25 +258,25 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ldr(builtin_index, MemOperand(kRootRegister, builtin_index)); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { LoadEntryFromBuiltinIndex(builtin_index); Call(builtin_index); } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { ASM_CODE_COMMENT(this); ldr(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { ASM_CODE_COMMENT(this); DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) { +void MacroAssembler::CallBuiltin(Builtin builtin, Condition cond) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); // Use ip directly instead of using UseScratchRegisterScope, as we do not // preserve scratch registers across calls. @@ -307,7 +307,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { +void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); // Use ip directly instead of using UseScratchRegisterScope, as we do not @@ -339,12 +339,12 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { } } -void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) { ASM_CODE_COMMENT(this); ldr(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -353,20 +353,20 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object) { ASM_CODE_COMMENT(this); LoadCodeEntry(code_object, code_object); Call(code_object); } -void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); LoadCodeEntry(code_object, code_object); Jump(code_object); } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { ASM_CODE_COMMENT(this); // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. @@ -384,15 +384,15 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { Call(target); } -void TurboAssembler::Ret(Condition cond) { bx(lr, cond); } +void MacroAssembler::Ret(Condition cond) { bx(lr, cond); } -void TurboAssembler::Drop(int count, Condition cond) { +void MacroAssembler::Drop(int count, Condition cond) { if (count > 0) { add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond); } } -void TurboAssembler::Drop(Register count, Condition cond) { +void MacroAssembler::Drop(Register count, Condition cond) { add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond); } @@ -407,23 +407,23 @@ Operand MacroAssembler::ClearedValue() const { static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); } -void TurboAssembler::Call(Label* target) { bl(target); } +void MacroAssembler::Call(Label* target) { bl(target); } -void TurboAssembler::Push(Handle handle) { +void MacroAssembler::Push(Handle handle) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); mov(scratch, Operand(handle)); push(scratch); } -void TurboAssembler::Push(Smi smi) { +void MacroAssembler::Push(Smi smi) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); mov(scratch, Operand(smi)); push(scratch); } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, PushArrayOrder order) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -453,9 +453,9 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch, } } -void TurboAssembler::Move(Register dst, Smi smi) { mov(dst, Operand(smi)); } +void MacroAssembler::Move(Register dst, Smi smi) { mov(dst, Operand(smi)); } -void TurboAssembler::Move(Register dst, Handle value) { +void MacroAssembler::Move(Register dst, Handle value) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -466,7 +466,7 @@ void TurboAssembler::Move(Register dst, Handle value) { mov(dst, Operand(value)); } -void TurboAssembler::Move(Register dst, ExternalReference reference) { +void MacroAssembler::Move(Register dst, ExternalReference reference) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -477,33 +477,33 @@ void TurboAssembler::Move(Register dst, ExternalReference reference) { mov(dst, Operand(reference)); } -void TurboAssembler::Move(Register dst, Register src, Condition cond) { +void MacroAssembler::Move(Register dst, Register src, Condition cond) { if (dst != src) { mov(dst, src, LeaveCC, cond); } } -void TurboAssembler::Move(SwVfpRegister dst, SwVfpRegister src, +void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src, Condition cond) { if (dst != src) { vmov(dst, src, cond); } } -void TurboAssembler::Move(DwVfpRegister dst, DwVfpRegister src, +void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src, Condition cond) { if (dst != src) { vmov(dst, src, cond); } } -void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) { +void MacroAssembler::Move(QwNeonRegister dst, QwNeonRegister src) { if (dst != src) { vmov(dst, src); } } -void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, +void MacroAssembler::MovePair(Register dst0, Register src0, Register dst1, Register src1) { DCHECK_NE(dst0, dst1); if (dst0 != src1) { @@ -519,7 +519,7 @@ void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, } } -void TurboAssembler::Swap(Register srcdst0, Register srcdst1) { +void MacroAssembler::Swap(Register srcdst0, Register srcdst1) { DCHECK(srcdst0 != srcdst1); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -528,7 +528,7 @@ void TurboAssembler::Swap(Register srcdst0, Register srcdst1) { mov(srcdst1, scratch); } -void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { +void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { DCHECK(srcdst0 != srcdst1); DCHECK(VfpRegisterIsAvailable(srcdst0)); DCHECK(VfpRegisterIsAvailable(srcdst1)); @@ -544,7 +544,7 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { } } -void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) { +void MacroAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) { DCHECK(srcdst0 != srcdst1); vswp(srcdst0, srcdst1); } @@ -617,7 +617,7 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, } } -void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width, +void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width, Condition cond) { DCHECK_LT(lsb, 32); if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { @@ -630,7 +630,7 @@ void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width, } } -void TurboAssembler::LoadRoot(Register destination, RootIndex index, +void MacroAssembler::LoadRoot(Register destination, RootIndex index, Condition cond) { ldr(destination, MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), cond); @@ -674,19 +674,19 @@ void MacroAssembler::RecordWriteField(Register object, int offset, bind(&done); } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; ASM_CODE_COMMENT(this); stm(db_w, sp, registers); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { if (registers.is_empty()) return; ASM_CODE_COMMENT(this); ldm(ia_w, sp, registers); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode) { ASM_CODE_COMMENT(this); RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); @@ -703,7 +703,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -721,7 +721,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { ASM_CODE_COMMENT(this); @@ -740,7 +740,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, } } -void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, +void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, Register object, Operand offset) { DCHECK_NE(dst_object, dst_slot); DCHECK(offset.IsRegister() || offset.IsImmediate()); @@ -828,7 +828,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, bind(&done); } -void TurboAssembler::PushCommonFrame(Register marker_reg) { +void MacroAssembler::PushCommonFrame(Register marker_reg) { ASM_CODE_COMMENT(this); if (marker_reg.is_valid()) { if (marker_reg.code() > fp.code()) { @@ -845,7 +845,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { } } -void TurboAssembler::PushStandardFrame(Register function_reg) { +void MacroAssembler::PushStandardFrame(Register function_reg) { ASM_CODE_COMMENT(this); DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code()); stm(db_w, sp, {function_reg, cp, fp, lr}); @@ -855,7 +855,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) { Push(kJavaScriptCallArgCountRegister); } -void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, +void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond) { // Subtracting 0.0 preserves all inputs except for signalling NaNs, which @@ -864,35 +864,35 @@ void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, vsub(dst, src, kDoubleRegZero, cond); } -void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, +void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond) { // Compare and move FPSCR flags to the normal condition flags. VFPCompareAndLoadFlags(src1, src2, pc, cond); } -void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, +void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2, const Condition cond) { // Compare and move FPSCR flags to the normal condition flags. VFPCompareAndLoadFlags(src1, src2, pc, cond); } -void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, +void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond) { // Compare and move FPSCR flags to the normal condition flags. VFPCompareAndLoadFlags(src1, src2, pc, cond); } -void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, +void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, const double src2, const Condition cond) { // Compare and move FPSCR flags to the normal condition flags. VFPCompareAndLoadFlags(src1, src2, pc, cond); } -void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, +void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, const SwVfpRegister src2, const Register fpscr_flags, const Condition cond) { @@ -901,7 +901,7 @@ void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, vmrs(fpscr_flags, cond); } -void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, +void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2, const Register fpscr_flags, const Condition cond) { @@ -910,7 +910,7 @@ void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, vmrs(fpscr_flags, cond); } -void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, +void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Register fpscr_flags, const Condition cond) { @@ -919,7 +919,7 @@ void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, vmrs(fpscr_flags, cond); } -void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, +void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2, const Register fpscr_flags, const Condition cond) { @@ -928,7 +928,7 @@ void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, vmrs(fpscr_flags, cond); } -void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) { +void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) { if (src.code() < 16) { const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); vmov(dst, loc.high()); @@ -937,7 +937,7 @@ void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) { } } -void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) { +void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) { if (dst.code() < 16) { const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); vmov(loc.high(), src); @@ -946,7 +946,7 @@ void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) { } } -void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) { +void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) { if (src.code() < 16) { const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); vmov(dst, loc.low()); @@ -955,7 +955,7 @@ void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) { } } -void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) { +void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) { if (dst.code() < 16) { const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); vmov(loc.low(), src); @@ -964,7 +964,7 @@ void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) { } } -void TurboAssembler::VmovExtended(Register dst, int src_code) { +void MacroAssembler::VmovExtended(Register dst, int src_code) { DCHECK_LE(SwVfpRegister::kNumRegisters, src_code); DCHECK_GT(SwVfpRegister::kNumRegisters * 2, src_code); if (src_code & 0x1) { @@ -974,7 +974,7 @@ void TurboAssembler::VmovExtended(Register dst, int src_code) { } } -void TurboAssembler::VmovExtended(int dst_code, Register src) { +void MacroAssembler::VmovExtended(int dst_code, Register src) { DCHECK_LE(SwVfpRegister::kNumRegisters, dst_code); DCHECK_GT(SwVfpRegister::kNumRegisters * 2, dst_code); if (dst_code & 0x1) { @@ -984,7 +984,7 @@ void TurboAssembler::VmovExtended(int dst_code, Register src) { } } -void TurboAssembler::VmovExtended(int dst_code, int src_code) { +void MacroAssembler::VmovExtended(int dst_code, int src_code) { if (src_code == dst_code) return; if (src_code < SwVfpRegister::kNumRegisters && @@ -1054,7 +1054,7 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) { } } -void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) { +void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src) { if (dst_code < SwVfpRegister::kNumRegisters) { vldr(SwVfpRegister::from_code(dst_code), src); } else { @@ -1068,7 +1068,7 @@ void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) { } } -void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) { +void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code) { if (src_code < SwVfpRegister::kNumRegisters) { vstr(SwVfpRegister::from_code(src_code), dst); } else { @@ -1081,7 +1081,7 @@ void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) { } } -void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src, +void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane) { int size = NeonSz(dt); // 0, 1, 2 int byte = lane << size; @@ -1093,7 +1093,7 @@ void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src, vmov(dt, dst, double_source, double_lane); } -void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src, +void MacroAssembler::ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane) { int size = NeonSz(dt); // 0, 1, 2 int byte = lane << size; @@ -1102,19 +1102,19 @@ void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src, vmov(dt, dst, src, double_lane); } -void TurboAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src, +void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src, int lane) { int s_code = src.code() * 4 + lane; VmovExtended(dst.code(), s_code); } -void TurboAssembler::ExtractLane(DwVfpRegister dst, QwNeonRegister src, +void MacroAssembler::ExtractLane(DwVfpRegister dst, QwNeonRegister src, int lane) { DwVfpRegister double_dst = DwVfpRegister::from_code(src.code() * 2 + lane); vmov(dst, double_dst); } -void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, +void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane, NeonDataType dt, int lane) { Move(dst, src); int size = NeonSz(dt); // 0, 1, 2 @@ -1127,21 +1127,21 @@ void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, vmov(dt, double_dst, double_lane, src_lane); } -void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, +void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, SwVfpRegister src_lane, int lane) { Move(dst, src); int s_code = dst.code() * 4 + lane; VmovExtended(s_code, src_lane.code()); } -void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, +void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, DwVfpRegister src_lane, int lane) { Move(dst, src); DwVfpRegister double_dst = DwVfpRegister::from_code(dst.code() * 2 + lane); vmov(double_dst, src_lane); } -void TurboAssembler::LoadLane(NeonSize sz, NeonListOperand dst_list, +void MacroAssembler::LoadLane(NeonSize sz, NeonListOperand dst_list, uint8_t lane, NeonMemOperand src) { if (sz == Neon64) { // vld1s is not valid for Neon64. @@ -1151,7 +1151,7 @@ void TurboAssembler::LoadLane(NeonSize sz, NeonListOperand dst_list, } } -void TurboAssembler::StoreLane(NeonSize sz, NeonListOperand src_list, +void MacroAssembler::StoreLane(NeonSize sz, NeonListOperand src_list, uint8_t lane, NeonMemOperand dst) { if (sz == Neon64) { // vst1s is not valid for Neon64. @@ -1161,7 +1161,7 @@ void TurboAssembler::StoreLane(NeonSize sz, NeonListOperand src_list, } } -void TurboAssembler::LslPair(Register dst_low, Register dst_high, +void MacroAssembler::LslPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift) { DCHECK(!AreAliased(dst_high, src_low)); @@ -1186,7 +1186,7 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::LslPair(Register dst_low, Register dst_high, +void MacroAssembler::LslPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK_GE(63, shift); @@ -1209,7 +1209,7 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high, } } -void TurboAssembler::LsrPair(Register dst_low, Register dst_high, +void MacroAssembler::LsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1235,7 +1235,7 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::LsrPair(Register dst_low, Register dst_high, +void MacroAssembler::LsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK_GE(63, shift); @@ -1258,7 +1258,7 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high, } } -void TurboAssembler::AsrPair(Register dst_low, Register dst_high, +void MacroAssembler::AsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1283,7 +1283,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::AsrPair(Register dst_low, Register dst_high, +void MacroAssembler::AsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK_GE(63, shift); @@ -1306,7 +1306,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high, } } -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -1314,9 +1314,9 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) { PushCommonFrame(scratch); } -void TurboAssembler::Prologue() { PushStandardFrame(r1); } +void MacroAssembler::Prologue() { PushStandardFrame(r1); } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode) { int receiver_bytes = (mode == kCountExcludesReceiver) ? kPointerSize : 0; switch (type) { @@ -1339,7 +1339,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, ArgumentsCountType type, ArgumentsCountMode mode) { @@ -1354,7 +1354,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, } } -void TurboAssembler::EnterFrame(StackFrame::Type type, +void MacroAssembler::EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { ASM_CODE_COMMENT(this); // r0-r3: preserved @@ -1370,7 +1370,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type, #endif // V8_ENABLE_WEBASSEMBLY } -int TurboAssembler::LeaveFrame(StackFrame::Type type) { +int MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); // r0: preserved // r1: preserved @@ -1385,7 +1385,7 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type) { } #ifdef V8_OS_WIN -void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { +void MacroAssembler::AllocateStackSpace(Register bytes_scratch) { // "Functions that allocate 4 KB or more on the stack must ensure that each // page prior to the final page is touched in order." Source: // https://docs.microsoft.com/en-us/cpp/build/overview-of-arm-abi-conventions?view=vs-2019#stack @@ -1408,7 +1408,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { sub(sp, sp, bytes_scratch); } -void TurboAssembler::AllocateStackSpace(int bytes) { +void MacroAssembler::AllocateStackSpace(int bytes) { ASM_CODE_COMMENT(this); DCHECK_GE(bytes, 0); UseScratchRegisterScope temps(this); @@ -1470,7 +1470,7 @@ void MacroAssembler::EnterExitFrame(int stack_space, str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if V8_HOST_ARCH_ARM // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -1522,7 +1522,7 @@ void MacroAssembler::LeaveExitFrame(Register argument_count, } } -void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) { +void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) { if (use_eabi_hardfloat()) { Move(dst, d0); } else { @@ -1531,7 +1531,7 @@ void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) { } // On ARM this is just a synonym to make the purpose clear. -void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) { +void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) { MovFromFloatResult(dst); } @@ -1543,10 +1543,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); ldr(destination, MemOperand(kRootRegister, offset)); } @@ -1841,7 +1841,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, b(ls, on_in_range); } -void TurboAssembler::TryInlineTruncateDoubleToI(Register result, +void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DwVfpRegister double_input, Label* done) { ASM_CODE_COMMENT(this); @@ -1867,7 +1867,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result, b(lt, done); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DwVfpRegister double_input, StubCallMode stub_mode) { @@ -2121,11 +2121,11 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, } #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::Assert(Condition cond, AbortReason reason) { +void MacroAssembler::Assert(Condition cond, AbortReason reason) { if (v8_flags.debug_code) Check(cond, reason); } -void TurboAssembler::AssertUnreachable(AbortReason reason) { +void MacroAssembler::AssertUnreachable(AbortReason reason) { if (v8_flags.debug_code) Abort(reason); } @@ -2234,7 +2234,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, } #endif // V8_ENABLE_DEBUG_CODE -void TurboAssembler::Check(Condition cond, AbortReason reason) { +void MacroAssembler::Check(Condition cond, AbortReason reason) { Label L; b(cond, &L); Abort(reason); @@ -2242,7 +2242,7 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) { bind(&L); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { ASM_CODE_COMMENT(this); Label abort_start; bind(&abort_start); @@ -2290,7 +2290,7 @@ void TurboAssembler::Abort(AbortReason reason) { // will not return here } -void TurboAssembler::LoadMap(Register destination, Register object) { +void MacroAssembler::LoadMap(Register destination, Register object) { ldr(destination, FieldMemOperand(object, HeapObject::kMapOffset)); } @@ -2307,7 +2307,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { ldr(dst, MemOperand(dst, Context::SlotOffset(index))); } -void TurboAssembler::InitializeRootRegister() { +void MacroAssembler::InitializeRootRegister() { ASM_CODE_COMMENT(this); ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); mov(kRootRegister, Operand(isolate_root)); @@ -2325,17 +2325,17 @@ void MacroAssembler::SmiTst(Register value) { tst(value, Operand(kSmiTagMask)); } -void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) { +void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) { tst(value, Operand(kSmiTagMask)); b(eq, smi_label); } -void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { cmp(x, Operand(y)); b(eq, dest); } -void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { cmp(x, Operand(y)); b(lt, dest); } @@ -2345,14 +2345,14 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) { b(ne, not_smi_label); } -void TurboAssembler::CheckFor32DRegs(Register scratch) { +void MacroAssembler::CheckFor32DRegs(Register scratch) { ASM_CODE_COMMENT(this); Move(scratch, ExternalReference::cpu_features()); ldr(scratch, MemOperand(scratch)); tst(scratch, Operand(1u << VFP32DREGS)); } -void TurboAssembler::SaveFPRegs(Register location, Register scratch) { +void MacroAssembler::SaveFPRegs(Register location, Register scratch) { ASM_CODE_COMMENT(this); CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported); CheckFor32DRegs(scratch); @@ -2361,7 +2361,7 @@ void TurboAssembler::SaveFPRegs(Register location, Register scratch) { vstm(db_w, location, d0, d15); } -void TurboAssembler::RestoreFPRegs(Register location, Register scratch) { +void MacroAssembler::RestoreFPRegs(Register location, Register scratch) { ASM_CODE_COMMENT(this); CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported); CheckFor32DRegs(scratch); @@ -2370,7 +2370,7 @@ void TurboAssembler::RestoreFPRegs(Register location, Register scratch) { add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); } -void TurboAssembler::SaveFPRegsToHeap(Register location, Register scratch) { +void MacroAssembler::SaveFPRegsToHeap(Register location, Register scratch) { ASM_CODE_COMMENT(this); CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported); CheckFor32DRegs(scratch); @@ -2379,7 +2379,7 @@ void TurboAssembler::SaveFPRegsToHeap(Register location, Register scratch) { add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); } -void TurboAssembler::RestoreFPRegsFromHeap(Register location, +void MacroAssembler::RestoreFPRegsFromHeap(Register location, Register scratch) { ASM_CODE_COMMENT(this); CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported); @@ -2390,7 +2390,7 @@ void TurboAssembler::RestoreFPRegsFromHeap(Register location, } template -void TurboAssembler::FloatMaxHelper(T result, T left, T right, +void MacroAssembler::FloatMaxHelper(T result, T left, T right, Label* out_of_line) { // This trivial case is caught sooner, so that the out-of-line code can be // completely avoided. @@ -2421,7 +2421,7 @@ void TurboAssembler::FloatMaxHelper(T result, T left, T right, } template -void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) { +void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) { DCHECK(left != right); // ARMv8: At least one of left and right is a NaN. @@ -2434,7 +2434,7 @@ void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) { } template -void TurboAssembler::FloatMinHelper(T result, T left, T right, +void MacroAssembler::FloatMinHelper(T result, T left, T right, Label* out_of_line) { // This trivial case is caught sooner, so that the out-of-line code can be // completely avoided. @@ -2480,7 +2480,7 @@ void TurboAssembler::FloatMinHelper(T result, T left, T right, } template -void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) { +void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) { DCHECK(left != right); // At least one of left and right is a NaN. Use vadd to propagate the NaN @@ -2488,42 +2488,42 @@ void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) { vadd(result, left, right); } -void TurboAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left, +void MacroAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label* out_of_line) { FloatMaxHelper(result, left, right, out_of_line); } -void TurboAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left, +void MacroAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label* out_of_line) { FloatMinHelper(result, left, right, out_of_line); } -void TurboAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left, +void MacroAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right, Label* out_of_line) { FloatMaxHelper(result, left, right, out_of_line); } -void TurboAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left, +void MacroAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right, Label* out_of_line) { FloatMinHelper(result, left, right, out_of_line); } -void TurboAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left, +void MacroAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right) { FloatMaxOutOfLineHelper(result, left, right); } -void TurboAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left, +void MacroAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right) { FloatMinOutOfLineHelper(result, left, right); } -void TurboAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left, +void MacroAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right) { FloatMaxOutOfLineHelper(result, left, right); } -void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left, +void MacroAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right) { FloatMinOutOfLineHelper(result, left, right); } @@ -2532,7 +2532,7 @@ static const int kRegisterPassedArguments = 4; // The hardfloat calling convention passes double arguments in registers d0-d7. static const int kDoubleRegisterPassedArguments = 8; -int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, +int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments) { int stack_passed_words = 0; if (use_eabi_hardfloat()) { @@ -2554,7 +2554,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, return stack_passed_words; } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { ASM_CODE_COMMENT(this); @@ -2576,7 +2576,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, } } -void TurboAssembler::MovToFloatParameter(DwVfpRegister src) { +void MacroAssembler::MovToFloatParameter(DwVfpRegister src) { DCHECK(src == d0); if (!use_eabi_hardfloat()) { vmov(r0, r1, src); @@ -2584,11 +2584,11 @@ void TurboAssembler::MovToFloatParameter(DwVfpRegister src) { } // On ARM this is just a synonym to make the purpose clear. -void TurboAssembler::MovToFloatResult(DwVfpRegister src) { +void MacroAssembler::MovToFloatResult(DwVfpRegister src) { MovToFloatParameter(src); } -void TurboAssembler::MovToFloatParameters(DwVfpRegister src1, +void MacroAssembler::MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2) { DCHECK(src1 == d0); DCHECK(src2 == d1); @@ -2598,7 +2598,7 @@ void TurboAssembler::MovToFloatParameters(DwVfpRegister src1, } } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { UseScratchRegisterScope temps(this); @@ -2607,21 +2607,21 @@ void TurboAssembler::CallCFunction(ExternalReference function, CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, +void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments) { CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunctionHelper(Register function, +void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { ASM_CODE_COMMENT(this); @@ -2704,7 +2704,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, } } -void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc, +void MacroAssembler::CheckPageFlag(Register object, int mask, Condition cc, Label* condition_met) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -2732,13 +2732,13 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, UNREACHABLE(); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { ASM_CODE_COMMENT(this); // We can use the register pc - 8 for the address of the current instruction. sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta)); } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); @@ -2760,10 +2760,10 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DCHECK(!has_pending_constants()); } -void TurboAssembler::Trap() { stop(); } -void TurboAssembler::DebugBreak() { stop(); } +void MacroAssembler::Trap() { stop(); } +void MacroAssembler::DebugBreak() { stop(); } -void TurboAssembler::I64x2BitMask(Register dst, QwNeonRegister src) { +void MacroAssembler::I64x2BitMask(Register dst, QwNeonRegister src) { UseScratchRegisterScope temps(this); QwNeonRegister tmp1 = temps.AcquireQ(); Register tmp = temps.Acquire(); @@ -2774,7 +2774,7 @@ void TurboAssembler::I64x2BitMask(Register dst, QwNeonRegister src) { add(dst, dst, Operand(tmp, LSL, 1)); } -void TurboAssembler::I64x2Eq(QwNeonRegister dst, QwNeonRegister src1, +void MacroAssembler::I64x2Eq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2) { UseScratchRegisterScope temps(this); Simd128Register scratch = temps.AcquireQ(); @@ -2783,7 +2783,7 @@ void TurboAssembler::I64x2Eq(QwNeonRegister dst, QwNeonRegister src1, vand(dst, dst, scratch); } -void TurboAssembler::I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, +void MacroAssembler::I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2) { UseScratchRegisterScope temps(this); Simd128Register tmp = temps.AcquireQ(); @@ -2793,14 +2793,14 @@ void TurboAssembler::I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, vorn(dst, dst, tmp); } -void TurboAssembler::I64x2GtS(QwNeonRegister dst, QwNeonRegister src1, +void MacroAssembler::I64x2GtS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2) { ASM_CODE_COMMENT(this); vqsub(NeonS64, dst, src2, src1); vshr(NeonS64, dst, dst, 63); } -void TurboAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, +void MacroAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2) { ASM_CODE_COMMENT(this); vqsub(NeonS64, dst, src1, src2); @@ -2808,7 +2808,7 @@ void TurboAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, vmvn(dst, dst); } -void TurboAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) { +void MacroAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); QwNeonRegister tmp = temps.AcquireQ(); @@ -2832,7 +2832,7 @@ void TurboAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) { // = defintion of i64x2.all_true. } -void TurboAssembler::I64x2Abs(QwNeonRegister dst, QwNeonRegister src) { +void MacroAssembler::I64x2Abs(QwNeonRegister dst, QwNeonRegister src) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Simd128Register tmp = temps.AcquireQ(); @@ -2861,17 +2861,17 @@ void F64x2ConvertLowHelper(Assembler* assm, QwNeonRegister dst, } } // namespace -void TurboAssembler::F64x2ConvertLowI32x4S(QwNeonRegister dst, +void MacroAssembler::F64x2ConvertLowI32x4S(QwNeonRegister dst, QwNeonRegister src) { F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_s32); } -void TurboAssembler::F64x2ConvertLowI32x4U(QwNeonRegister dst, +void MacroAssembler::F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src) { F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_u32); } -void TurboAssembler::F64x2PromoteLowF32x4(QwNeonRegister dst, +void MacroAssembler::F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src) { F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_f32); } diff --git a/src/codegen/arm/macro-assembler-arm.h b/src/codegen/arm/macro-assembler-arm.h index ea95b03416..fb69d20f4d 100644 --- a/src/codegen/arm/macro-assembler-arm.h +++ b/src/codegen/arm/macro-assembler-arm.h @@ -43,9 +43,9 @@ enum TargetAddressStorageMode { NEVER_INLINE_TARGET_ADDRESS }; -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; // Activation support. void EnterFrame(StackFrame::Type type, @@ -596,49 +596,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src); void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src); - private: - // Compare single values and then load the fpscr flags to a register. - void VFPCompareAndLoadFlags(const SwVfpRegister src1, - const SwVfpRegister src2, - const Register fpscr_flags, - const Condition cond = al); - void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2, - const Register fpscr_flags, - const Condition cond = al); - - // Compare double values and then load the fpscr flags to a register. - void VFPCompareAndLoadFlags(const DwVfpRegister src1, - const DwVfpRegister src2, - const Register fpscr_flags, - const Condition cond = al); - void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2, - const Register fpscr_flags, - const Condition cond = al); - - void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); - - // Implementation helpers for FloatMin and FloatMax. - template - void FloatMaxHelper(T result, T left, T right, Label* out_of_line); - template - void FloatMinHelper(T result, T left, T right, Label* out_of_line); - template - void FloatMaxOutOfLineHelper(T result, T left, T right); - template - void FloatMinOutOfLineHelper(T result, T left, T right); - - int CalculateStackPassedWords(int num_reg_arguments, - int num_double_arguments); - - void CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - void Mls(Register dst, Register src1, Register src2, Register srcA, Condition cond = al); void And(Register dst, Register src1, const Operand& src2, @@ -899,6 +856,42 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { Register actual_parameter_count, Label* done, InvokeType type); + // Compare single values and then load the fpscr flags to a register. + void VFPCompareAndLoadFlags(const SwVfpRegister src1, + const SwVfpRegister src2, + const Register fpscr_flags, + const Condition cond = al); + void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2, + const Register fpscr_flags, + const Condition cond = al); + + // Compare double values and then load the fpscr flags to a register. + void VFPCompareAndLoadFlags(const DwVfpRegister src1, + const DwVfpRegister src2, + const Register fpscr_flags, + const Condition cond = al); + void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2, + const Register fpscr_flags, + const Condition cond = al); + + void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); + + // Implementation helpers for FloatMin and FloatMax. + template + void FloatMaxHelper(T result, T left, T right, Label* out_of_line); + template + void FloatMinHelper(T result, T left, T right, Label* out_of_line); + template + void FloatMaxOutOfLineHelper(T result, T left, T right); + template + void FloatMinOutOfLineHelper(T result, T left, T right); + + int CalculateStackPassedWords(int num_reg_arguments, + int num_double_arguments); + + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments); + DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); }; diff --git a/src/codegen/arm64/macro-assembler-arm64-inl.h b/src/codegen/arm64/macro-assembler-arm64-inl.h index 6a4cc044ee..55e416957a 100644 --- a/src/codegen/arm64/macro-assembler-arm64-inl.h +++ b/src/codegen/arm64/macro-assembler-arm64-inl.h @@ -21,26 +21,26 @@ MemOperand FieldMemOperand(Register object, int offset) { return MemOperand(object, offset - kHeapObjectTag); } -void TurboAssembler::And(const Register& rd, const Register& rn, +void MacroAssembler::And(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, AND); } -void TurboAssembler::Ands(const Register& rd, const Register& rn, +void MacroAssembler::Ands(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, ANDS); } -void TurboAssembler::Tst(const Register& rn, const Operand& operand) { +void MacroAssembler::Tst(const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS); } -void TurboAssembler::Bic(const Register& rd, const Register& rn, +void MacroAssembler::Bic(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -54,35 +54,35 @@ void MacroAssembler::Bics(const Register& rd, const Register& rn, LogicalMacro(rd, rn, operand, BICS); } -void TurboAssembler::Orr(const Register& rd, const Register& rn, +void MacroAssembler::Orr(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, ORR); } -void TurboAssembler::Orn(const Register& rd, const Register& rn, +void MacroAssembler::Orn(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, ORN); } -void TurboAssembler::Eor(const Register& rd, const Register& rn, +void MacroAssembler::Eor(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, EOR); } -void TurboAssembler::Eon(const Register& rd, const Register& rn, +void MacroAssembler::Eon(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, EON); } -void TurboAssembler::Ccmp(const Register& rn, const Operand& operand, +void MacroAssembler::Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv, Condition cond) { DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) { @@ -92,7 +92,7 @@ void TurboAssembler::Ccmp(const Register& rn, const Operand& operand, } } -void TurboAssembler::CcmpTagged(const Register& rn, const Operand& operand, +void MacroAssembler::CcmpTagged(const Register& rn, const Operand& operand, StatusFlags nzcv, Condition cond) { if (COMPRESS_POINTERS_BOOL) { Ccmp(rn.W(), operand.ToW(), nzcv, cond); @@ -101,7 +101,7 @@ void TurboAssembler::CcmpTagged(const Register& rn, const Operand& operand, } } -void TurboAssembler::Ccmn(const Register& rn, const Operand& operand, +void MacroAssembler::Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv, Condition cond) { DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) { @@ -111,7 +111,7 @@ void TurboAssembler::Ccmn(const Register& rn, const Operand& operand, } } -void TurboAssembler::Add(const Register& rd, const Register& rn, +void MacroAssembler::Add(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && @@ -122,7 +122,7 @@ void TurboAssembler::Add(const Register& rd, const Register& rn, } } -void TurboAssembler::Adds(const Register& rd, const Register& rn, +void MacroAssembler::Adds(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && @@ -133,7 +133,7 @@ void TurboAssembler::Adds(const Register& rd, const Register& rn, } } -void TurboAssembler::Sub(const Register& rd, const Register& rn, +void MacroAssembler::Sub(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && @@ -144,7 +144,7 @@ void TurboAssembler::Sub(const Register& rd, const Register& rn, } } -void TurboAssembler::Subs(const Register& rd, const Register& rn, +void MacroAssembler::Subs(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && @@ -155,17 +155,17 @@ void TurboAssembler::Subs(const Register& rd, const Register& rn, } } -void TurboAssembler::Cmn(const Register& rn, const Operand& operand) { +void MacroAssembler::Cmn(const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); Adds(AppropriateZeroRegFor(rn), rn, operand); } -void TurboAssembler::Cmp(const Register& rn, const Operand& operand) { +void MacroAssembler::Cmp(const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); Subs(AppropriateZeroRegFor(rn), rn, operand); } -void TurboAssembler::CmpTagged(const Register& rn, const Operand& operand) { +void MacroAssembler::CmpTagged(const Register& rn, const Operand& operand) { if (COMPRESS_POINTERS_BOOL) { Cmp(rn.W(), operand.ToW()); } else { @@ -173,7 +173,7 @@ void TurboAssembler::CmpTagged(const Register& rn, const Operand& operand) { } } -void TurboAssembler::Neg(const Register& rd, const Operand& operand) { +void MacroAssembler::Neg(const Register& rd, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); if (operand.IsImmediate()) { @@ -183,12 +183,12 @@ void TurboAssembler::Neg(const Register& rd, const Operand& operand) { } } -void TurboAssembler::Negs(const Register& rd, const Operand& operand) { +void MacroAssembler::Negs(const Register& rd, const Operand& operand) { DCHECK(allow_macro_instructions()); Subs(rd, AppropriateZeroRegFor(rd), operand); } -void TurboAssembler::Adc(const Register& rd, const Register& rn, +void MacroAssembler::Adc(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -230,14 +230,14 @@ void MacroAssembler::Ngcs(const Register& rd, const Operand& operand) { Sbcs(rd, zr, operand); } -void TurboAssembler::Mvn(const Register& rd, uint64_t imm) { +void MacroAssembler::Mvn(const Register& rd, uint64_t imm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); Mov(rd, ~imm); } #define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \ - void TurboAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \ + void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \ DCHECK(allow_macro_instructions()); \ LoadStoreMacro(REG, addr, OP); \ } @@ -245,7 +245,7 @@ LS_MACRO_LIST(DEFINE_FUNCTION) #undef DEFINE_FUNCTION #define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ - void TurboAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \ + void MacroAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \ const MemOperand& addr) { \ DCHECK(allow_macro_instructions()); \ LoadStorePairMacro(REG, REG2, addr, OP); \ @@ -254,7 +254,7 @@ LSPAIR_MACRO_LIST(DEFINE_FUNCTION) #undef DEFINE_FUNCTION #define DECLARE_FUNCTION(FN, OP) \ - void TurboAssembler::FN(const Register& rt, const Register& rn) { \ + void MacroAssembler::FN(const Register& rt, const Register& rn) { \ DCHECK(allow_macro_instructions()); \ OP(rt, rn); \ } @@ -270,32 +270,32 @@ LDA_STL_MACRO_LIST(DECLARE_FUNCTION) STLX_MACRO_LIST(DECLARE_FUNCTION) #undef DECLARE_FUNCTION -void TurboAssembler::Asr(const Register& rd, const Register& rn, +void MacroAssembler::Asr(const Register& rd, const Register& rn, unsigned shift) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); asr(rd, rn, shift); } -void TurboAssembler::Asr(const Register& rd, const Register& rn, +void MacroAssembler::Asr(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); asrv(rd, rn, rm); } -void TurboAssembler::B(Label* label) { +void MacroAssembler::B(Label* label) { DCHECK(allow_macro_instructions()); b(label); CheckVeneerPool(false, false); } -void TurboAssembler::B(Condition cond, Label* label) { +void MacroAssembler::B(Condition cond, Label* label) { DCHECK(allow_macro_instructions()); B(label, cond); } -void TurboAssembler::Bfi(const Register& rd, const Register& rn, unsigned lsb, +void MacroAssembler::Bfi(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -309,7 +309,7 @@ void MacroAssembler::Bfxil(const Register& rd, const Register& rn, unsigned lsb, bfxil(rd, rn, lsb, width); } -void TurboAssembler::Bind(Label* label, BranchTargetIdentifier id) { +void MacroAssembler::Bind(Label* label, BranchTargetIdentifier id) { DCHECK(allow_macro_instructions()); if (id == BranchTargetIdentifier::kNone) { bind(label); @@ -326,21 +326,21 @@ void TurboAssembler::Bind(Label* label, BranchTargetIdentifier id) { } } -void TurboAssembler::CodeEntry() { CallTarget(); } +void MacroAssembler::CodeEntry() { CallTarget(); } -void TurboAssembler::ExceptionHandler() { JumpTarget(); } +void MacroAssembler::ExceptionHandler() { JumpTarget(); } -void TurboAssembler::BindExceptionHandler(Label* label) { +void MacroAssembler::BindExceptionHandler(Label* label) { BindJumpTarget(label); } -void TurboAssembler::JumpTarget() { +void MacroAssembler::JumpTarget() { #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY bti(BranchTargetIdentifier::kBtiJump); #endif } -void TurboAssembler::BindJumpTarget(Label* label) { +void MacroAssembler::BindJumpTarget(Label* label) { #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY Bind(label, BranchTargetIdentifier::kBtiJump); #else @@ -348,19 +348,19 @@ void TurboAssembler::BindJumpTarget(Label* label) { #endif } -void TurboAssembler::CallTarget() { +void MacroAssembler::CallTarget() { #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY bti(BranchTargetIdentifier::kBtiCall); #endif } -void TurboAssembler::JumpOrCallTarget() { +void MacroAssembler::JumpOrCallTarget() { #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY bti(BranchTargetIdentifier::kBtiJumpCall); #endif } -void TurboAssembler::BindJumpOrCallTarget(Label* label) { +void MacroAssembler::BindJumpOrCallTarget(Label* label) { #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY Bind(label, BranchTargetIdentifier::kBtiJumpCall); #else @@ -368,24 +368,24 @@ void TurboAssembler::BindJumpOrCallTarget(Label* label) { #endif } -void TurboAssembler::Bl(Label* label) { +void MacroAssembler::Bl(Label* label) { DCHECK(allow_macro_instructions()); bl(label); } -void TurboAssembler::Blr(const Register& xn) { +void MacroAssembler::Blr(const Register& xn) { DCHECK(allow_macro_instructions()); DCHECK(!xn.IsZero()); blr(xn); } -void TurboAssembler::Br(const Register& xn) { +void MacroAssembler::Br(const Register& xn) { DCHECK(allow_macro_instructions()); DCHECK(!xn.IsZero()); br(xn); } -void TurboAssembler::Brk(int code) { +void MacroAssembler::Brk(int code) { DCHECK(allow_macro_instructions()); brk(code); } @@ -406,19 +406,19 @@ void MacroAssembler::Cinv(const Register& rd, const Register& rn, cinv(rd, rn, cond); } -void TurboAssembler::Cls(const Register& rd, const Register& rn) { +void MacroAssembler::Cls(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); cls(rd, rn); } -void TurboAssembler::Clz(const Register& rd, const Register& rn) { +void MacroAssembler::Clz(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); clz(rd, rn); } -void TurboAssembler::Cneg(const Register& rd, const Register& rn, +void MacroAssembler::Cneg(const Register& rd, const Register& rn, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -437,7 +437,7 @@ void MacroAssembler::CzeroX(const Register& rd, Condition cond) { // Conditionally move a value into the destination register. Only X registers // are supported due to the truncation side-effect when used on W registers. -void TurboAssembler::CmovX(const Register& rd, const Register& rn, +void MacroAssembler::CmovX(const Register& rd, const Register& rn, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsSP()); @@ -448,26 +448,26 @@ void TurboAssembler::CmovX(const Register& rd, const Register& rn, } } -void TurboAssembler::Csdb() { +void MacroAssembler::Csdb() { DCHECK(allow_macro_instructions()); csdb(); } -void TurboAssembler::Cset(const Register& rd, Condition cond) { +void MacroAssembler::Cset(const Register& rd, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); DCHECK((cond != al) && (cond != nv)); cset(rd, cond); } -void TurboAssembler::Csetm(const Register& rd, Condition cond) { +void MacroAssembler::Csetm(const Register& rd, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); DCHECK((cond != al) && (cond != nv)); csetm(rd, cond); } -void TurboAssembler::Csinc(const Register& rd, const Register& rn, +void MacroAssembler::Csinc(const Register& rd, const Register& rn, const Register& rm, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -491,17 +491,17 @@ void MacroAssembler::Csneg(const Register& rd, const Register& rn, csneg(rd, rn, rm, cond); } -void TurboAssembler::Dmb(BarrierDomain domain, BarrierType type) { +void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) { DCHECK(allow_macro_instructions()); dmb(domain, type); } -void TurboAssembler::Dsb(BarrierDomain domain, BarrierType type) { +void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) { DCHECK(allow_macro_instructions()); dsb(domain, type); } -void TurboAssembler::Debug(const char* message, uint32_t code, Instr params) { +void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) { DCHECK(allow_macro_instructions()); debug(message, code, params); } @@ -513,25 +513,25 @@ void MacroAssembler::Extr(const Register& rd, const Register& rn, extr(rd, rn, rm, lsb); } -void TurboAssembler::Fabs(const VRegister& fd, const VRegister& fn) { +void MacroAssembler::Fabs(const VRegister& fd, const VRegister& fn) { DCHECK(allow_macro_instructions()); fabs(fd, fn); } -void TurboAssembler::Fadd(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fadd(const VRegister& fd, const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fadd(fd, fn, fm); } -void TurboAssembler::Fccmp(const VRegister& fn, const VRegister& fm, +void MacroAssembler::Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK((cond != al) && (cond != nv)); fccmp(fn, fm, nzcv, cond); } -void TurboAssembler::Fccmp(const VRegister& fn, const double value, +void MacroAssembler::Fccmp(const VRegister& fn, const double value, StatusFlags nzcv, Condition cond) { DCHECK(allow_macro_instructions()); UseScratchRegisterScope temps(this); @@ -540,12 +540,12 @@ void TurboAssembler::Fccmp(const VRegister& fn, const double value, Fccmp(fn, tmp, nzcv, cond); } -void TurboAssembler::Fcmp(const VRegister& fn, const VRegister& fm) { +void MacroAssembler::Fcmp(const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fcmp(fn, fm); } -void TurboAssembler::Fcmp(const VRegister& fn, double value) { +void MacroAssembler::Fcmp(const VRegister& fn, double value) { DCHECK(allow_macro_instructions()); if (value != 0.0) { UseScratchRegisterScope temps(this); @@ -557,66 +557,66 @@ void TurboAssembler::Fcmp(const VRegister& fn, double value) { } } -void TurboAssembler::Fcsel(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fcsel(const VRegister& fd, const VRegister& fn, const VRegister& fm, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK((cond != al) && (cond != nv)); fcsel(fd, fn, fm, cond); } -void TurboAssembler::Fcvt(const VRegister& fd, const VRegister& fn) { +void MacroAssembler::Fcvt(const VRegister& fd, const VRegister& fn) { DCHECK(allow_macro_instructions()); fcvt(fd, fn); } -void TurboAssembler::Fcvtas(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtas(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtas(rd, fn); } -void TurboAssembler::Fcvtau(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtau(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtau(rd, fn); } -void TurboAssembler::Fcvtms(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtms(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtms(rd, fn); } -void TurboAssembler::Fcvtmu(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtmu(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtmu(rd, fn); } -void TurboAssembler::Fcvtns(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtns(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtns(rd, fn); } -void TurboAssembler::Fcvtnu(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtnu(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtnu(rd, fn); } -void TurboAssembler::Fcvtzs(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtzs(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtzs(rd, fn); } -void TurboAssembler::Fcvtzu(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtzu(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtzu(rd, fn); } -void TurboAssembler::Fdiv(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fdiv(const VRegister& fd, const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fdiv(fd, fn, fm); @@ -628,7 +628,7 @@ void MacroAssembler::Fmadd(const VRegister& fd, const VRegister& fn, fmadd(fd, fn, fm, fa); } -void TurboAssembler::Fmax(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fmax(const VRegister& fd, const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fmax(fd, fn, fm); @@ -640,7 +640,7 @@ void MacroAssembler::Fmaxnm(const VRegister& fd, const VRegister& fn, fmaxnm(fd, fn, fm); } -void TurboAssembler::Fmin(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fmin(const VRegister& fd, const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fmin(fd, fn, fm); @@ -652,7 +652,7 @@ void MacroAssembler::Fminnm(const VRegister& fd, const VRegister& fn, fminnm(fd, fn, fm); } -void TurboAssembler::Fmov(VRegister fd, VRegister fn) { +void MacroAssembler::Fmov(VRegister fd, VRegister fn) { DCHECK(allow_macro_instructions()); // Only emit an instruction if fd and fn are different, and they are both D // registers. fmov(s0, s0) is not a no-op because it clears the top word of @@ -663,12 +663,12 @@ void TurboAssembler::Fmov(VRegister fd, VRegister fn) { } } -void TurboAssembler::Fmov(VRegister fd, Register rn) { +void MacroAssembler::Fmov(VRegister fd, Register rn) { DCHECK(allow_macro_instructions()); fmov(fd, rn); } -void TurboAssembler::Fmov(VRegister vd, double imm) { +void MacroAssembler::Fmov(VRegister vd, double imm) { DCHECK(allow_macro_instructions()); if (vd.Is1S() || vd.Is2S() || vd.Is4S()) { @@ -696,7 +696,7 @@ void TurboAssembler::Fmov(VRegister vd, double imm) { } } -void TurboAssembler::Fmov(VRegister vd, float imm) { +void MacroAssembler::Fmov(VRegister vd, float imm) { DCHECK(allow_macro_instructions()); if (vd.Is1D() || vd.Is2D()) { Fmov(vd, static_cast(imm)); @@ -723,7 +723,7 @@ void TurboAssembler::Fmov(VRegister vd, float imm) { } } -void TurboAssembler::Fmov(Register rd, VRegister fn) { +void MacroAssembler::Fmov(Register rd, VRegister fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fmov(rd, fn); @@ -735,7 +735,7 @@ void MacroAssembler::Fmsub(const VRegister& fd, const VRegister& fn, fmsub(fd, fn, fm, fa); } -void TurboAssembler::Fmul(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fmul(const VRegister& fd, const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fmul(fd, fn, fm); @@ -753,7 +753,7 @@ void MacroAssembler::Fnmsub(const VRegister& fd, const VRegister& fn, fnmsub(fd, fn, fm, fa); } -void TurboAssembler::Fsub(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fsub(const VRegister& fd, const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fsub(fd, fn, fm); @@ -769,52 +769,52 @@ void MacroAssembler::Hlt(int code) { hlt(code); } -void TurboAssembler::Isb() { +void MacroAssembler::Isb() { DCHECK(allow_macro_instructions()); isb(); } -void TurboAssembler::Ldr(const CPURegister& rt, const Operand& operand) { +void MacroAssembler::Ldr(const CPURegister& rt, const Operand& operand) { DCHECK(allow_macro_instructions()); ldr(rt, operand); } -void TurboAssembler::Lsl(const Register& rd, const Register& rn, +void MacroAssembler::Lsl(const Register& rd, const Register& rn, unsigned shift) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); lsl(rd, rn, shift); } -void TurboAssembler::Lsl(const Register& rd, const Register& rn, +void MacroAssembler::Lsl(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); lslv(rd, rn, rm); } -void TurboAssembler::Lsr(const Register& rd, const Register& rn, +void MacroAssembler::Lsr(const Register& rd, const Register& rn, unsigned shift) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); lsr(rd, rn, shift); } -void TurboAssembler::Lsr(const Register& rd, const Register& rn, +void MacroAssembler::Lsr(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); lsrv(rd, rn, rm); } -void TurboAssembler::Madd(const Register& rd, const Register& rn, +void MacroAssembler::Madd(const Register& rd, const Register& rn, const Register& rm, const Register& ra) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); madd(rd, rn, rm, ra); } -void TurboAssembler::Mneg(const Register& rd, const Register& rn, +void MacroAssembler::Mneg(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -827,44 +827,38 @@ void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) { movk(rd, imm, shift); } -void TurboAssembler::Mrs(const Register& rt, SystemRegister sysreg) { +void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) { DCHECK(allow_macro_instructions()); DCHECK(!rt.IsZero()); mrs(rt, sysreg); } -void TurboAssembler::Msr(SystemRegister sysreg, const Register& rt) { +void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) { DCHECK(allow_macro_instructions()); msr(sysreg, rt); } -void TurboAssembler::Msub(const Register& rd, const Register& rn, +void MacroAssembler::Msub(const Register& rd, const Register& rn, const Register& rm, const Register& ra) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); msub(rd, rn, rm, ra); } -void TurboAssembler::Mul(const Register& rd, const Register& rn, +void MacroAssembler::Mul(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); mul(rd, rn, rm); } -void TurboAssembler::Rbit(const Register& rd, const Register& rn) { +void MacroAssembler::Rbit(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); rbit(rd, rn); } -void TurboAssembler::Rev(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions()); - DCHECK(!rd.IsZero()); - rev(rd, rn); -} - -void TurboAssembler::Ret(const Register& xn) { +void MacroAssembler::Ret(const Register& xn) { DCHECK(allow_macro_instructions()); DCHECK(!xn.IsZero()); ret(xn); @@ -877,46 +871,46 @@ void MacroAssembler::Rev(const Register& rd, const Register& rn) { rev(rd, rn); } -void TurboAssembler::Rev16(const Register& rd, const Register& rn) { +void MacroAssembler::Rev16(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); rev16(rd, rn); } -void TurboAssembler::Rev32(const Register& rd, const Register& rn) { +void MacroAssembler::Rev32(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); rev32(rd, rn); } -void TurboAssembler::Ror(const Register& rd, const Register& rs, +void MacroAssembler::Ror(const Register& rd, const Register& rs, unsigned shift) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); ror(rd, rs, shift); } -void TurboAssembler::Ror(const Register& rd, const Register& rn, +void MacroAssembler::Ror(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); rorv(rd, rn, rm); } -void TurboAssembler::Sbfx(const Register& rd, const Register& rn, unsigned lsb, +void MacroAssembler::Sbfx(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sbfx(rd, rn, lsb, width); } -void TurboAssembler::Scvtf(const VRegister& fd, const Register& rn, +void MacroAssembler::Scvtf(const VRegister& fd, const Register& rn, unsigned fbits) { DCHECK(allow_macro_instructions()); scvtf(fd, rn, fbits); } -void TurboAssembler::Sdiv(const Register& rd, const Register& rn, +void MacroAssembler::Sdiv(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -937,80 +931,80 @@ void MacroAssembler::Smsubl(const Register& rd, const Register& rn, smsubl(rd, rn, rm, ra); } -void TurboAssembler::Smull(const Register& rd, const Register& rn, +void MacroAssembler::Smull(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); smull(rd, rn, rm); } -void TurboAssembler::Smulh(const Register& rd, const Register& rn, +void MacroAssembler::Smulh(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); smulh(rd, rn, rm); } -void TurboAssembler::Umull(const Register& rd, const Register& rn, +void MacroAssembler::Umull(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); umaddl(rd, rn, rm, xzr); } -void TurboAssembler::Umulh(const Register& rd, const Register& rn, +void MacroAssembler::Umulh(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); umulh(rd, rn, rm); } -void TurboAssembler::Sxtb(const Register& rd, const Register& rn) { +void MacroAssembler::Sxtb(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sxtb(rd, rn); } -void TurboAssembler::Sxth(const Register& rd, const Register& rn) { +void MacroAssembler::Sxth(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sxth(rd, rn); } -void TurboAssembler::Sxtw(const Register& rd, const Register& rn) { +void MacroAssembler::Sxtw(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sxtw(rd, rn); } -void TurboAssembler::Ubfiz(const Register& rd, const Register& rn, unsigned lsb, +void MacroAssembler::Ubfiz(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); ubfiz(rd, rn, lsb, width); } -void TurboAssembler::Sbfiz(const Register& rd, const Register& rn, unsigned lsb, +void MacroAssembler::Sbfiz(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sbfiz(rd, rn, lsb, width); } -void TurboAssembler::Ubfx(const Register& rd, const Register& rn, unsigned lsb, +void MacroAssembler::Ubfx(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); ubfx(rd, rn, lsb, width); } -void TurboAssembler::Ucvtf(const VRegister& fd, const Register& rn, +void MacroAssembler::Ucvtf(const VRegister& fd, const Register& rn, unsigned fbits) { DCHECK(allow_macro_instructions()); ucvtf(fd, rn, fbits); } -void TurboAssembler::Udiv(const Register& rd, const Register& rn, +void MacroAssembler::Udiv(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -1031,25 +1025,25 @@ void MacroAssembler::Umsubl(const Register& rd, const Register& rn, umsubl(rd, rn, rm, ra); } -void TurboAssembler::Uxtb(const Register& rd, const Register& rn) { +void MacroAssembler::Uxtb(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); uxtb(rd, rn); } -void TurboAssembler::Uxth(const Register& rd, const Register& rn) { +void MacroAssembler::Uxth(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); uxth(rd, rn); } -void TurboAssembler::Uxtw(const Register& rd, const Register& rn) { +void MacroAssembler::Uxtw(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); uxtw(rd, rn); } -void TurboAssembler::InitializeRootRegister() { +void MacroAssembler::InitializeRootRegister() { ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); Mov(kRootRegister, Operand(isolate_root)); #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE @@ -1057,15 +1051,15 @@ void TurboAssembler::InitializeRootRegister() { #endif } -void TurboAssembler::SmiTag(Register dst, Register src) { +void MacroAssembler::SmiTag(Register dst, Register src) { DCHECK(dst.Is64Bits() && src.Is64Bits()); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); Lsl(dst, src, kSmiShift); } -void TurboAssembler::SmiTag(Register smi) { SmiTag(smi, smi); } +void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); } -void TurboAssembler::SmiUntag(Register dst, Register src) { +void MacroAssembler::SmiUntag(Register dst, Register src) { DCHECK(dst.Is64Bits() && src.Is64Bits()); if (v8_flags.enable_slow_asserts) { AssertSmi(src); @@ -1078,7 +1072,7 @@ void TurboAssembler::SmiUntag(Register dst, Register src) { } } -void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) { DCHECK(dst.Is64Bits()); if (SmiValuesAre32Bits()) { if (src.IsImmediateOffset() && src.shift_amount() == 0) { @@ -1104,11 +1098,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { } } -void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); } +void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); } -void TurboAssembler::SmiToInt32(Register smi) { SmiToInt32(smi, smi); } +void MacroAssembler::SmiToInt32(Register smi) { SmiToInt32(smi, smi); } -void TurboAssembler::SmiToInt32(Register dst, Register smi) { +void MacroAssembler::SmiToInt32(Register dst, Register smi) { DCHECK(dst.Is64Bits()); if (v8_flags.enable_slow_asserts) { AssertSmi(smi); @@ -1121,7 +1115,7 @@ void TurboAssembler::SmiToInt32(Register dst, Register smi) { } } -void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, +void MacroAssembler::JumpIfSmi(Register value, Label* smi_label, Label* not_smi_label) { static_assert((kSmiTagSize == 1) && (kSmiTag == 0)); // Check if the tag bit is set. @@ -1136,11 +1130,11 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, } } -void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { CompareAndBranch(x, y, eq, dest); } -void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { CompareAndBranch(x, y, lt, dest); } @@ -1154,10 +1148,10 @@ inline void MacroAssembler::AssertFeedbackVector(Register object) { AssertFeedbackVector(object, scratch); } -void TurboAssembler::jmp(Label* L) { B(L); } +void MacroAssembler::jmp(Label* L) { B(L); } -template -void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1, +template +void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, const CPURegister& src2, const CPURegister& src3) { DCHECK(AreSameSizeAndType(src0, src1, src2, src3)); DCHECK_IMPLIES((lr_mode == kSignLR), ((src0 == lr) || (src1 == lr) || @@ -1178,8 +1172,8 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1, PushHelper(count, size, src0, src1, src2, src3); } -template -void TurboAssembler::Push(const Register& src0, const VRegister& src1) { +template +void MacroAssembler::Push(const Register& src0, const VRegister& src1) { DCHECK_IMPLIES((lr_mode == kSignLR), ((src0 == lr) || (src1 == lr))); DCHECK_IMPLIES((lr_mode == kDontStoreLR), ((src0 != lr) && (src1 != lr))); #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY @@ -1197,8 +1191,8 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) { str(src0, MemOperand(sp, src1.SizeInBytes())); } -template -void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, +template +void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, const CPURegister& dst2, const CPURegister& dst3) { // It is not valid to pop into the same register more than once in one // instruction, not even into the zero register. @@ -1224,8 +1218,8 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, #endif } -template -void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) { +template +void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { DCHECK_IMPLIES((lr_mode == kSignLR), (src == lr)); DCHECK_IMPLIES((lr_mode == kDontStoreLR), (src != lr)); #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY @@ -1244,8 +1238,8 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) { Str(src, MemOperand(sp, offset)); } -template -void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) { +template +void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) { if (offset.IsImmediate()) { DCHECK_GE(offset.ImmediateValue(), 0); } else if (v8_flags.debug_code) { @@ -1264,7 +1258,7 @@ void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) { #endif } -void TurboAssembler::Claim(int64_t count, uint64_t unit_size) { +void MacroAssembler::Claim(int64_t count, uint64_t unit_size) { DCHECK_GE(count, 0); uint64_t size = count * unit_size; @@ -1282,7 +1276,7 @@ void TurboAssembler::Claim(int64_t count, uint64_t unit_size) { Sub(sp, sp, size); } -void TurboAssembler::Claim(const Register& count, uint64_t unit_size) { +void MacroAssembler::Claim(const Register& count, uint64_t unit_size) { if (unit_size == 0) return; DCHECK(base::bits::IsPowerOfTwo(unit_size)); @@ -1323,7 +1317,7 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) { #endif } -void TurboAssembler::Drop(int64_t count, uint64_t unit_size) { +void MacroAssembler::Drop(int64_t count, uint64_t unit_size) { DCHECK_GE(count, 0); uint64_t size = count * unit_size; @@ -1335,7 +1329,7 @@ void TurboAssembler::Drop(int64_t count, uint64_t unit_size) { DCHECK_EQ(size % 16, 0); } -void TurboAssembler::Drop(const Register& count, uint64_t unit_size) { +void MacroAssembler::Drop(const Register& count, uint64_t unit_size) { if (unit_size == 0) return; DCHECK(base::bits::IsPowerOfTwo(unit_size)); @@ -1350,7 +1344,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) { Add(sp, sp, size); } -void TurboAssembler::DropArguments(const Register& count, +void MacroAssembler::DropArguments(const Register& count, ArgumentsCountMode mode) { int extra_slots = 1; // Padding slot. if (mode == kCountExcludesReceiver) { @@ -1364,7 +1358,7 @@ void TurboAssembler::DropArguments(const Register& count, Drop(tmp, kXRegSize); } -void TurboAssembler::DropArguments(int64_t count, ArgumentsCountMode mode) { +void MacroAssembler::DropArguments(int64_t count, ArgumentsCountMode mode) { if (mode == kCountExcludesReceiver) { // Add a slot for the receiver. ++count; @@ -1372,13 +1366,13 @@ void TurboAssembler::DropArguments(int64_t count, ArgumentsCountMode mode) { Drop(RoundUp(count, 2), kXRegSize); } -void TurboAssembler::DropSlots(int64_t count) { +void MacroAssembler::DropSlots(int64_t count) { Drop(RoundUp(count, 2), kXRegSize); } -void TurboAssembler::PushArgument(const Register& arg) { Push(padreg, arg); } +void MacroAssembler::PushArgument(const Register& arg) { Push(padreg, arg); } -void TurboAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs, +void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs, Condition cond, Label* label) { if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) && ((cond == eq) || (cond == ne) || (cond == hi) || (cond == ls))) { @@ -1393,7 +1387,7 @@ void TurboAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs, } } -void TurboAssembler::CompareTaggedAndBranch(const Register& lhs, +void MacroAssembler::CompareTaggedAndBranch(const Register& lhs, const Operand& rhs, Condition cond, Label* label) { if (COMPRESS_POINTERS_BOOL) { @@ -1403,7 +1397,7 @@ void TurboAssembler::CompareTaggedAndBranch(const Register& lhs, } } -void TurboAssembler::TestAndBranchIfAnySet(const Register& reg, +void MacroAssembler::TestAndBranchIfAnySet(const Register& reg, const uint64_t bit_pattern, Label* label) { int bits = reg.SizeInBits(); @@ -1416,7 +1410,7 @@ void TurboAssembler::TestAndBranchIfAnySet(const Register& reg, } } -void TurboAssembler::TestAndBranchIfAllClear(const Register& reg, +void MacroAssembler::TestAndBranchIfAllClear(const Register& reg, const uint64_t bit_pattern, Label* label) { int bits = reg.SizeInBits(); @@ -1429,7 +1423,7 @@ void TurboAssembler::TestAndBranchIfAllClear(const Register& reg, } } -void TurboAssembler::MoveHeapNumber(Register dst, double value) { +void MacroAssembler::MoveHeapNumber(Register dst, double value) { Mov(dst, Operand::EmbeddedHeapNumber(value)); } diff --git a/src/codegen/arm64/macro-assembler-arm64.cc b/src/codegen/arm64/macro-assembler-arm64.cc index c7f875d266..7523df9a60 100644 --- a/src/codegen/arm64/macro-assembler-arm64.cc +++ b/src/codegen/arm64/macro-assembler-arm64.cc @@ -39,9 +39,9 @@ namespace v8 { namespace internal { -CPURegList TurboAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); } +CPURegList MacroAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); } -CPURegList TurboAssembler::DefaultFPTmpList() { +CPURegList MacroAssembler::DefaultFPTmpList() { return CPURegList(fp_scratch1, fp_scratch2); } @@ -57,7 +57,7 @@ constexpr int kStackSavedSavedFPSizeInBits = kDRegSizeInBits; } // namespace -void TurboAssembler::PushCPURegList(CPURegList registers) { +void MacroAssembler::PushCPURegList(CPURegList registers) { // If LR was stored here, we would need to sign it if // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on. DCHECK(!registers.IncludesAliasOf(lr)); @@ -77,7 +77,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) { } } -void TurboAssembler::PopCPURegList(CPURegList registers) { +void MacroAssembler::PopCPURegList(CPURegList registers) { int size = registers.RegisterSizeInBytes(); DCHECK_EQ(0, (size * registers.Count()) % 16); @@ -139,7 +139,7 @@ void MacroAssembler::PopAll(RegList reglist) { } } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) const { auto list = kCallerSaved; list.Remove(exclusion); @@ -155,7 +155,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { ASM_CODE_COMMENT(this); auto list = kCallerSaved; @@ -175,7 +175,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { ASM_CODE_COMMENT(this); int bytes = 0; if (fp_mode == SaveFPRegsMode::kSave) { @@ -195,7 +195,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { return bytes; } -void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn, +void MacroAssembler::LogicalMacro(const Register& rd, const Register& rn, const Operand& operand, LogicalOp op) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -303,7 +303,7 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn, } } -void TurboAssembler::Mov(const Register& rd, uint64_t imm) { +void MacroAssembler::Mov(const Register& rd, uint64_t imm) { DCHECK(allow_macro_instructions()); DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits()); DCHECK(!rd.IsZero()); @@ -379,7 +379,7 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) { } } -void TurboAssembler::Mov(const Register& rd, const Operand& operand, +void MacroAssembler::Mov(const Register& rd, const Operand& operand, DiscardMoveMode discard_mode) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -447,11 +447,11 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand, } } -void TurboAssembler::Mov(const Register& rd, Smi smi) { +void MacroAssembler::Mov(const Register& rd, Smi smi) { return Mov(rd, Operand(smi)); } -void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) { +void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) { DCHECK(is_uint16(imm)); int byte1 = (imm & 0xFF); int byte2 = ((imm >> 8) & 0xFF); @@ -473,7 +473,7 @@ void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) { } } -void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) { +void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) { DCHECK(is_uint32(imm)); uint8_t bytes[sizeof(imm)]; @@ -550,7 +550,7 @@ void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) { } } -void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) { +void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) { // All bytes are either 0x00 or 0xFF. { bool all0orff = true; @@ -586,7 +586,7 @@ void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) { } } -void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift, +void MacroAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift, int shift_amount) { DCHECK(allow_macro_instructions()); if (shift_amount != 0 || shift != LSL) { @@ -607,7 +607,7 @@ void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift, } } -void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { +void MacroAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { // TODO(v8:11033): Move 128-bit values in a more efficient way. DCHECK(vd.Is128Bits()); Movi(vd.V2D(), lo); @@ -619,7 +619,7 @@ void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { } } -void TurboAssembler::Mvn(const Register& rd, const Operand& operand) { +void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { DCHECK(allow_macro_instructions()); if (operand.NeedsRelocation(this)) { @@ -642,7 +642,7 @@ void TurboAssembler::Mvn(const Register& rd, const Operand& operand) { } } -unsigned TurboAssembler::CountSetHalfWords(uint64_t imm, unsigned reg_size) { +unsigned MacroAssembler::CountSetHalfWords(uint64_t imm, unsigned reg_size) { DCHECK_EQ(reg_size % 16, 0); #define HALFWORD(idx) (((imm >> ((idx)*16)) & 0xFFFF) ? 1u : 0u) @@ -660,18 +660,18 @@ unsigned TurboAssembler::CountSetHalfWords(uint64_t imm, unsigned reg_size) { // The movz instruction can generate immediates containing an arbitrary 16-bit // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. -bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { +bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits)); return CountSetHalfWords(imm, reg_size) <= 1; } // The movn instruction can generate immediates containing an arbitrary 16-bit // half-word, with remaining bits set, eg. 0xFFFF1234, 0xFFFF1234FFFFFFFF. -bool TurboAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { +bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { return IsImmMovz(~imm, reg_size); } -void TurboAssembler::ConditionalCompareMacro(const Register& rn, +void MacroAssembler::ConditionalCompareMacro(const Register& rn, const Operand& operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op) { @@ -699,7 +699,7 @@ void TurboAssembler::ConditionalCompareMacro(const Register& rn, } } -void TurboAssembler::Csel(const Register& rd, const Register& rn, +void MacroAssembler::Csel(const Register& rd, const Register& rn, const Operand& operand, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -733,7 +733,7 @@ void TurboAssembler::Csel(const Register& rd, const Register& rn, } } -bool TurboAssembler::TryOneInstrMoveImmediate(const Register& dst, +bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst, int64_t imm) { unsigned n, imm_s, imm_r; int reg_size = dst.SizeInBits(); @@ -755,7 +755,7 @@ bool TurboAssembler::TryOneInstrMoveImmediate(const Register& dst, return false; } -Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst, +Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst, int64_t imm, PreShiftImmMode mode) { int reg_size = dst.SizeInBits(); @@ -805,7 +805,7 @@ Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst, return Operand(dst); } -void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn, +void MacroAssembler::AddSubMacro(const Register& rd, const Register& rn, const Operand& operand, FlagsUpdate S, AddSubOp op) { if (operand.IsZero() && rd == rn && rd.Is64Bits() && rn.Is64Bits() && @@ -851,7 +851,7 @@ void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn, } } -void TurboAssembler::AddSubWithCarryMacro(const Register& rd, +void MacroAssembler::AddSubWithCarryMacro(const Register& rd, const Register& rn, const Operand& operand, FlagsUpdate S, AddSubWithCarryOp op) { @@ -900,7 +900,7 @@ void TurboAssembler::AddSubWithCarryMacro(const Register& rd, } } -void TurboAssembler::LoadStoreMacro(const CPURegister& rt, +void MacroAssembler::LoadStoreMacro(const CPURegister& rt, const MemOperand& addr, LoadStoreOp op) { int64_t offset = addr.offset(); unsigned size = CalcLSDataSize(op); @@ -930,7 +930,7 @@ void TurboAssembler::LoadStoreMacro(const CPURegister& rt, } } -void TurboAssembler::LoadStorePairMacro(const CPURegister& rt, +void MacroAssembler::LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, const MemOperand& addr, LoadStorePairOp op) { @@ -963,7 +963,7 @@ void TurboAssembler::LoadStorePairMacro(const CPURegister& rt, } } -bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch( +bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch( Label* label, ImmBranchType b_type) { bool need_longer_range = false; // There are two situations in which we care about the offset being out of @@ -986,7 +986,7 @@ bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch( return need_longer_range; } -void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) { +void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -1020,7 +1020,7 @@ void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) { } } -void TurboAssembler::B(Label* label, BranchType type, Register reg, int bit) { +void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { DCHECK((reg == NoReg || type >= kBranchTypeFirstUsingReg) && (bit == -1 || type >= kBranchTypeFirstUsingBit)); if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { @@ -1050,7 +1050,7 @@ void TurboAssembler::B(Label* label, BranchType type, Register reg, int bit) { } } -void TurboAssembler::B(Label* label, Condition cond) { +void MacroAssembler::B(Label* label, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK((cond != al) && (cond != nv)); @@ -1067,7 +1067,7 @@ void TurboAssembler::B(Label* label, Condition cond) { bind(&done); } -void TurboAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { +void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { DCHECK(allow_macro_instructions()); Label done; @@ -1083,7 +1083,7 @@ void TurboAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { bind(&done); } -void TurboAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { +void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { DCHECK(allow_macro_instructions()); Label done; @@ -1099,7 +1099,7 @@ void TurboAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { bind(&done); } -void TurboAssembler::Cbnz(const Register& rt, Label* label) { +void MacroAssembler::Cbnz(const Register& rt, Label* label) { DCHECK(allow_macro_instructions()); Label done; @@ -1115,7 +1115,7 @@ void TurboAssembler::Cbnz(const Register& rt, Label* label) { bind(&done); } -void TurboAssembler::Cbz(const Register& rt, Label* label) { +void MacroAssembler::Cbz(const Register& rt, Label* label) { DCHECK(allow_macro_instructions()); Label done; @@ -1133,7 +1133,7 @@ void TurboAssembler::Cbz(const Register& rt, Label* label) { // Pseudo-instructions. -void TurboAssembler::Abs(const Register& rd, const Register& rm, +void MacroAssembler::Abs(const Register& rd, const Register& rm, Label* is_not_representable, Label* is_representable) { DCHECK(allow_macro_instructions()); DCHECK(AreSameSizeAndType(rd, rm)); @@ -1154,7 +1154,7 @@ void TurboAssembler::Abs(const Register& rd, const Register& rm, } } -void TurboAssembler::Switch(Register scratch, Register value, +void MacroAssembler::Switch(Register scratch, Register value, int case_value_base, Label** labels, int num_labels) { Register table = scratch; @@ -1178,7 +1178,7 @@ void TurboAssembler::Switch(Register scratch, Register value, // Abstracted stack operations. -void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1, +void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, const CPURegister& src2, const CPURegister& src3, const CPURegister& src4, const CPURegister& src5, const CPURegister& src6, const CPURegister& src7) { @@ -1192,7 +1192,7 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1, PushHelper(count - 4, size, src4, src5, src6, src7); } -void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, +void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, const CPURegister& dst2, const CPURegister& dst3, const CPURegister& dst4, const CPURegister& dst5, const CPURegister& dst6, const CPURegister& dst7) { @@ -1238,7 +1238,7 @@ void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { Bind(&done); } -void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0, +void MacroAssembler::PushHelper(int count, int size, const CPURegister& src0, const CPURegister& src1, const CPURegister& src2, const CPURegister& src3) { @@ -1276,7 +1276,7 @@ void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0, } } -void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0, +void MacroAssembler::PopHelper(int count, int size, const CPURegister& dst0, const CPURegister& dst1, const CPURegister& dst2, const CPURegister& dst3) { // Ensure that we don't unintentially modify scratch or debug registers. @@ -1314,7 +1314,7 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0, } } -void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2, +void MacroAssembler::PokePair(const CPURegister& src1, const CPURegister& src2, int offset) { DCHECK(AreSameSizeAndType(src1, src2)); DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0)); @@ -1529,14 +1529,14 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( TailCallOptimizedCodeSlot(this, optimized_code_entry, x4); } -Condition TurboAssembler::CheckSmi(Register object) { +Condition MacroAssembler::CheckSmi(Register object) { static_assert(kSmiTag == 0); Tst(object, kSmiTagMask); return eq; } #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::AssertSpAligned() { +void MacroAssembler::AssertSpAligned() { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); HardAbortScope hard_abort(this); // Avoid calls to Abort. @@ -1549,7 +1549,7 @@ void TurboAssembler::AssertSpAligned() { Check(eq, AbortReason::kUnexpectedStackPointer); } -void TurboAssembler::AssertFPCRState(Register fpcr) { +void MacroAssembler::AssertFPCRState(Register fpcr) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); Label unexpected_mode, done; @@ -1573,7 +1573,7 @@ void TurboAssembler::AssertFPCRState(Register fpcr) { Bind(&done); } -void TurboAssembler::AssertSmi(Register object, AbortReason reason) { +void MacroAssembler::AssertSmi(Register object, AbortReason reason) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -1581,7 +1581,7 @@ void TurboAssembler::AssertSmi(Register object, AbortReason reason) { Check(eq, reason); } -void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) { +void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -1589,7 +1589,7 @@ void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) { Check(ne, reason); } -void TurboAssembler::AssertZeroExtended(Register int32_register) { +void MacroAssembler::AssertZeroExtended(Register int32_register) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); Tst(int32_register.X(), kMaxUInt32); @@ -1704,7 +1704,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) { Bind(&done_checking); } -void TurboAssembler::AssertPositiveOrZero(Register value) { +void MacroAssembler::AssertPositiveOrZero(Register value) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); Label done; @@ -1714,18 +1714,18 @@ void TurboAssembler::AssertPositiveOrZero(Register value) { Bind(&done); } -void TurboAssembler::Assert(Condition cond, AbortReason reason) { +void MacroAssembler::Assert(Condition cond, AbortReason reason) { if (v8_flags.debug_code) { Check(cond, reason); } } -void TurboAssembler::AssertUnreachable(AbortReason reason) { +void MacroAssembler::AssertUnreachable(AbortReason reason) { if (v8_flags.debug_code) Abort(reason); } #endif // V8_ENABLE_DEBUG_CODE -void TurboAssembler::CopySlots(int dst, Register src, Register slot_count) { +void MacroAssembler::CopySlots(int dst, Register src, Register slot_count) { DCHECK(!src.IsZero()); UseScratchRegisterScope scope(this); Register dst_reg = scope.AcquireX(); @@ -1734,7 +1734,7 @@ void TurboAssembler::CopySlots(int dst, Register src, Register slot_count) { CopyDoubleWords(dst_reg, src, slot_count); } -void TurboAssembler::CopySlots(Register dst, Register src, +void MacroAssembler::CopySlots(Register dst, Register src, Register slot_count) { DCHECK(!dst.IsZero() && !src.IsZero()); SlotAddress(dst, dst); @@ -1742,7 +1742,7 @@ void TurboAssembler::CopySlots(Register dst, Register src, CopyDoubleWords(dst, src, slot_count); } -void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count, +void MacroAssembler::CopyDoubleWords(Register dst, Register src, Register count, CopyDoubleWordsMode mode) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(dst, src, count)); @@ -1813,15 +1813,15 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count, Bind(&done); } -void TurboAssembler::SlotAddress(Register dst, int slot_offset) { +void MacroAssembler::SlotAddress(Register dst, int slot_offset) { Add(dst, sp, slot_offset << kSystemPointerSizeLog2); } -void TurboAssembler::SlotAddress(Register dst, Register slot_offset) { +void MacroAssembler::SlotAddress(Register dst, Register slot_offset) { Add(dst, sp, Operand(slot_offset, LSL, kSystemPointerSizeLog2)); } -void TurboAssembler::CanonicalizeNaN(const VRegister& dst, +void MacroAssembler::CanonicalizeNaN(const VRegister& dst, const VRegister& src) { AssertFPCRState(); @@ -1831,7 +1831,7 @@ void TurboAssembler::CanonicalizeNaN(const VRegister& dst, Fsub(dst, src, fp_zero); } -void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) { ASM_CODE_COMMENT(this); if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { Mov(destination, @@ -1841,7 +1841,7 @@ void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { LoadRoot(destination, index); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadRoot(Register destination, RootIndex index) { ASM_CODE_COMMENT(this); // TODO(v8:13466, olivf): With static roots we could use // DecompressTaggedPointer here. However, currently all roots have addresses @@ -1852,7 +1852,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) { MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); } -void TurboAssembler::PushRoot(RootIndex index) { +void MacroAssembler::PushRoot(RootIndex index) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register tmp = temps.AcquireX(); @@ -1860,14 +1860,14 @@ void TurboAssembler::PushRoot(RootIndex index) { Push(tmp); } -void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); } -void TurboAssembler::Move(Register dst, MemOperand src) { Ldr(dst, src); } -void TurboAssembler::Move(Register dst, Register src) { +void MacroAssembler::Move(Register dst, Smi src) { Mov(dst, src); } +void MacroAssembler::Move(Register dst, MemOperand src) { Ldr(dst, src); } +void MacroAssembler::Move(Register dst, Register src) { if (dst == src) return; Mov(dst, src); } -void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, +void MacroAssembler::MovePair(Register dst0, Register src0, Register dst1, Register src1) { DCHECK_NE(dst0, dst1); if (dst0 != src1) { @@ -1883,7 +1883,7 @@ void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, } } -void TurboAssembler::Swap(Register lhs, Register rhs) { +void MacroAssembler::Swap(Register lhs, Register rhs) { DCHECK(lhs.IsSameSizeAndType(rhs)); DCHECK_NE(lhs, rhs); UseScratchRegisterScope temps(this); @@ -1893,7 +1893,7 @@ void TurboAssembler::Swap(Register lhs, Register rhs) { Mov(lhs, temp); } -void TurboAssembler::Swap(VRegister lhs, VRegister rhs) { +void MacroAssembler::Swap(VRegister lhs, VRegister rhs) { DCHECK(lhs.IsSameSizeAndType(rhs)); DCHECK_NE(lhs, rhs); UseScratchRegisterScope temps(this); @@ -1957,7 +1957,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { JumpToExternalReference(ExternalReference::Create(fid)); } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if V8_HOST_ARCH_ARM64 // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -1973,12 +1973,12 @@ int TurboAssembler::ActivationFrameAlignment() { #endif // V8_HOST_ARCH_ARM64 } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_of_reg_args) { CallCFunction(function, num_of_reg_args, 0); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_of_reg_args, int num_of_double_args) { ASM_CODE_COMMENT(this); @@ -1991,7 +1991,7 @@ void TurboAssembler::CallCFunction(ExternalReference function, static const int kRegisterPassedArguments = 8; static const int kFPRegisterPassedArguments = 8; -void TurboAssembler::CallCFunction(Register function, int num_of_reg_args, +void MacroAssembler::CallCFunction(Register function, int num_of_reg_args, int num_of_double_args) { ASM_CODE_COMMENT(this); DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters); @@ -2056,7 +2056,7 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args, } } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { ASM_CODE_COMMENT(this); DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); @@ -2066,11 +2066,11 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, constant_index))); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { Ldr(destination, MemOperand(kRootRegister, offset)); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { Mov(destination, kRootRegister); @@ -2079,7 +2079,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -MemOperand TurboAssembler::ExternalReferenceAsOperand( +MemOperand MacroAssembler::ExternalReferenceAsOperand( ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t offset = @@ -2108,7 +2108,7 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand( return MemOperand(scratch, 0); } -void TurboAssembler::Jump(Register target, Condition cond) { +void MacroAssembler::Jump(Register target, Condition cond) { if (cond == nv) return; Label done; if (cond != al) B(NegateCondition(cond), &done); @@ -2116,7 +2116,7 @@ void TurboAssembler::Jump(Register target, Condition cond) { Bind(&done); } -void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode, +void MacroAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond) { if (cond == nv) return; Label done; @@ -2138,7 +2138,7 @@ void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode, // * the 'target' input unmodified if this is a Wasm call, or // * the offset of the target from the current PC, in instructions, for any // other type of call. -int64_t TurboAssembler::CalculateTargetOffset(Address target, +int64_t MacroAssembler::CalculateTargetOffset(Address target, RelocInfo::Mode rmode, byte* pc) { int64_t offset = static_cast(target); if (rmode == RelocInfo::WASM_CALL || rmode == RelocInfo::WASM_STUB_CALL) { @@ -2152,13 +2152,13 @@ int64_t TurboAssembler::CalculateTargetOffset(Address target, return offset; } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond) { int64_t offset = CalculateTargetOffset(target, rmode, pc_); JumpHelper(offset, rmode, cond); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond) { DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK_IMPLIES(options().isolate_independent_code, @@ -2179,19 +2179,19 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, } } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); Mov(scratch, reference); Jump(scratch); } -void TurboAssembler::Call(Register target) { +void MacroAssembler::Call(Register target) { BlockPoolsScope scope(this); Blr(target); } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) { +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) { BlockPoolsScope scope(this); if (CanUseNearCallOrJump(rmode)) { int64_t offset = CalculateTargetOffset(target, rmode, pc_); @@ -2202,7 +2202,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) { } } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode) { +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode) { DCHECK_IMPLIES(options().isolate_independent_code, Builtins::IsIsolateIndependentBuiltin(*code)); BlockPoolsScope scope(this); @@ -2224,14 +2224,14 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode) { } } -void TurboAssembler::Call(ExternalReference target) { +void MacroAssembler::Call(ExternalReference target) { UseScratchRegisterScope temps(this); Register temp = temps.AcquireX(); Mov(temp, target); Call(temp); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ASM_CODE_COMMENT(this); // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. @@ -2254,25 +2254,25 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { } } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { Ldr(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { ASM_CODE_COMMENT(this); DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { ASM_CODE_COMMENT(this); LoadEntryFromBuiltinIndex(builtin_index); Call(builtin_index); } -void TurboAssembler::CallBuiltin(Builtin builtin) { +void MacroAssembler::CallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); switch (options().builtin_call_jump_mode) { case BuiltinCallJumpMode::kAbsolute: { @@ -2310,15 +2310,15 @@ void TurboAssembler::CallBuiltin(Builtin builtin) { } // TODO(ishell): remove cond parameter from here to simplify things. -void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { +void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); // The control flow integrity (CFI) feature allows us to "sign" code entry // points as a target for calls, jumps or both. Arm64 has special // instructions for this purpose, so-called "landing pads" (see - // TurboAssembler::CallTarget(), TurboAssembler::JumpTarget() and - // TurboAssembler::JumpOrCallTarget()). Currently, we generate "Call" + // MacroAssembler::CallTarget(), MacroAssembler::JumpTarget() and + // MacroAssembler::JumpOrCallTarget()). Currently, we generate "Call" // landing pads for CPP builtins. In order to allow tail calling to those // builtins we have to use a workaround. // x17 is used to allow using "Call" (i.e. `bti c`) rather than "Jump" @@ -2360,12 +2360,12 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { } } -void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) { ASM_CODE_COMMENT(this); Ldr(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -2374,13 +2374,13 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object) { ASM_CODE_COMMENT(this); LoadCodeEntry(code_object, code_object); Call(code_object); } -void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); LoadCodeEntry(code_object, code_object); @@ -2392,7 +2392,7 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { Jump(x17); } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { ASM_CODE_COMMENT(this); // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. @@ -2426,7 +2426,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { Bind(&return_location); } -void TurboAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) { +void MacroAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register temp = temps.AcquireX(); @@ -2434,7 +2434,7 @@ void TurboAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) { Blr(temp); } -bool TurboAssembler::IsNearCallOffset(int64_t offset) { +bool MacroAssembler::IsNearCallOffset(int64_t offset) { return is_int26(offset); } @@ -2445,7 +2445,7 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) { // the flags in the referenced {Code} object; // 2. test kMarkedForDeoptimizationBit in those flags; and // 3. if it is not zero then it jumps to the builtin. -void TurboAssembler::BailoutIfDeoptimized() { +void MacroAssembler::BailoutIfDeoptimized() { UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; @@ -2460,7 +2460,7 @@ void TurboAssembler::BailoutIfDeoptimized() { Bind(¬_deoptimized); } -void TurboAssembler::CallForDeoptimization( +void MacroAssembler::CallForDeoptimization( Builtin target, int deopt_id, Label* exit, DeoptimizeKind kind, Label* ret, Label* jump_deoptimization_entry_label) { ASM_CODE_COMMENT(this); @@ -2479,10 +2479,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); Ldr(destination, MemOperand(kRootRegister, offset)); } @@ -2751,7 +2751,7 @@ void MacroAssembler::InvokeFunction(Register function, actual_parameter_count, type); } -void TurboAssembler::TryConvertDoubleToInt64(Register result, +void MacroAssembler::TryConvertDoubleToInt64(Register result, DoubleRegister double_input, Label* done) { ASM_CODE_COMMENT(this); @@ -2776,7 +2776,7 @@ void TurboAssembler::TryConvertDoubleToInt64(Register result, B(vc, done); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DoubleRegister double_input, StubCallMode stub_mode, @@ -2795,9 +2795,9 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, // If we fell through then inline version didn't succeed - call stub instead. if (lr_status == kLRHasNotBeenSaved) { - Push(lr, double_input); + Push(lr, double_input); } else { - Push(xzr, double_input); + Push(xzr, double_input); } // DoubleToI preserves any registers it needs to clobber. @@ -2817,7 +2817,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, if (lr_status == kLRHasNotBeenSaved) { // Pop into xzr here to drop the double input on the stack: - Pop(xzr, lr); + Pop(xzr, lr); } else { Drop(2); } @@ -2827,21 +2827,21 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Uxtw(result.W(), result.W()); } -void TurboAssembler::Prologue() { +void MacroAssembler::Prologue() { ASM_CODE_COMMENT(this); - Push(lr, fp); + Push(lr, fp); mov(fp, sp); static_assert(kExtraSlotClaimedByPrologue == 1); Push(cp, kJSFunctionRegister, kJavaScriptCallArgCountRegister, padreg); } -void TurboAssembler::EnterFrame(StackFrame::Type type) { +void MacroAssembler::EnterFrame(StackFrame::Type type) { UseScratchRegisterScope temps(this); if (StackFrame::IsJavaScript(type)) { // Just push a minimal "machine frame", saving the frame pointer and return // address, without any markers. - Push(lr, fp); + Push(lr, fp); Mov(fp, sp); // sp[1] : lr // sp[0] : fp @@ -2860,7 +2860,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { } else { fourth_reg = padreg; } - Push(lr, fp, type_reg, fourth_reg); + Push(lr, fp, type_reg, fourth_reg); static constexpr int kSPToFPDelta = 2 * kSystemPointerSize; Add(fp, sp, kSPToFPDelta); // sp[3] : lr @@ -2870,12 +2870,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { } } -void TurboAssembler::LeaveFrame(StackFrame::Type type) { +void MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); // Drop the execution stack down to the frame pointer and restore // the caller frame pointer and return address. Mov(sp, fp); - Pop(fp, lr); + Pop(fp, lr); } void MacroAssembler::EnterExitFrame(const Register& scratch, int extra_space, @@ -2885,7 +2885,7 @@ void MacroAssembler::EnterExitFrame(const Register& scratch, int extra_space, frame_type == StackFrame::BUILTIN_EXIT); // Set up the new stack frame. - Push(lr, fp); + Push(lr, fp); Mov(fp, sp); Mov(scratch, StackFrame::TypeToMarker(frame_type)); Push(scratch, xzr); @@ -2961,7 +2961,7 @@ void MacroAssembler::LeaveExitFrame(const Register& scratch, // fp -> fp[0]: CallerFP (old fp) // fp[...]: The rest of the frame. Mov(sp, fp); - Pop(fp, lr); + Pop(fp, lr); } void MacroAssembler::LoadGlobalProxy(Register dst) { @@ -3010,7 +3010,7 @@ void MacroAssembler::CompareObjectType(Register object, Register map, CompareInstanceType(map, type_reg, type); } -void TurboAssembler::LoadMap(Register dst, Register object) { +void MacroAssembler::LoadMap(Register dst, Register object) { ASM_CODE_COMMENT(this); LoadTaggedPointerField(dst, FieldMemOperand(object, HeapObject::kMapOffset)); } @@ -3086,7 +3086,7 @@ void MacroAssembler::JumpIfIsInRange(const Register& value, } } -void TurboAssembler::LoadTaggedPointerField(const Register& destination, +void MacroAssembler::LoadTaggedPointerField(const Register& destination, const MemOperand& field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressTaggedPointer(destination, field_operand); @@ -3095,7 +3095,7 @@ void TurboAssembler::LoadTaggedPointerField(const Register& destination, } } -void TurboAssembler::LoadAnyTaggedField(const Register& destination, +void MacroAssembler::LoadAnyTaggedField(const Register& destination, const MemOperand& field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressAnyTagged(destination, field_operand); @@ -3104,7 +3104,7 @@ void TurboAssembler::LoadAnyTaggedField(const Register& destination, } } -void TurboAssembler::LoadTaggedSignedField(const Register& destination, +void MacroAssembler::LoadTaggedSignedField(const Register& destination, const MemOperand& field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressTaggedSigned(destination, field_operand); @@ -3113,11 +3113,11 @@ void TurboAssembler::LoadTaggedSignedField(const Register& destination, } } -void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) { SmiUntag(dst, src); } -void TurboAssembler::StoreTaggedField(const Register& value, +void MacroAssembler::StoreTaggedField(const Register& value, const MemOperand& dst_field_operand) { if (COMPRESS_POINTERS_BOOL) { Str(value.W(), dst_field_operand); @@ -3126,7 +3126,7 @@ void TurboAssembler::StoreTaggedField(const Register& value, } } -void TurboAssembler::AtomicStoreTaggedField(const Register& value, +void MacroAssembler::AtomicStoreTaggedField(const Register& value, const Register& dst_base, const Register& dst_index, const Register& temp) { @@ -3138,7 +3138,7 @@ void TurboAssembler::AtomicStoreTaggedField(const Register& value, } } -void TurboAssembler::DecompressTaggedSigned(const Register& destination, +void MacroAssembler::DecompressTaggedSigned(const Register& destination, const MemOperand& field_operand) { ASM_CODE_COMMENT(this); Ldr(destination.W(), field_operand); @@ -3149,20 +3149,20 @@ void TurboAssembler::DecompressTaggedSigned(const Register& destination, } } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, +void MacroAssembler::DecompressTaggedPointer(const Register& destination, const MemOperand& field_operand) { ASM_CODE_COMMENT(this); Ldr(destination.W(), field_operand); Add(destination, kPtrComprCageBaseRegister, destination); } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, +void MacroAssembler::DecompressTaggedPointer(const Register& destination, const Register& source) { ASM_CODE_COMMENT(this); Add(destination, kPtrComprCageBaseRegister, Operand(source, UXTW)); } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, +void MacroAssembler::DecompressTaggedPointer(const Register& destination, Tagged_t immediate) { ASM_CODE_COMMENT(this); if (IsImmAddSub(immediate)) { @@ -3178,14 +3178,14 @@ void TurboAssembler::DecompressTaggedPointer(const Register& destination, } } -void TurboAssembler::DecompressAnyTagged(const Register& destination, +void MacroAssembler::DecompressAnyTagged(const Register& destination, const MemOperand& field_operand) { ASM_CODE_COMMENT(this); Ldr(destination.W(), field_operand); Add(destination, kPtrComprCageBaseRegister, destination); } -void TurboAssembler::AtomicDecompressTaggedSigned(const Register& destination, +void MacroAssembler::AtomicDecompressTaggedSigned(const Register& destination, const Register& base, const Register& index, const Register& temp) { @@ -3199,7 +3199,7 @@ void TurboAssembler::AtomicDecompressTaggedSigned(const Register& destination, } } -void TurboAssembler::AtomicDecompressTaggedPointer(const Register& destination, +void MacroAssembler::AtomicDecompressTaggedPointer(const Register& destination, const Register& base, const Register& index, const Register& temp) { @@ -3209,7 +3209,7 @@ void TurboAssembler::AtomicDecompressTaggedPointer(const Register& destination, Add(destination, kPtrComprCageBaseRegister, destination); } -void TurboAssembler::AtomicDecompressAnyTagged(const Register& destination, +void MacroAssembler::AtomicDecompressAnyTagged(const Register& destination, const Register& base, const Register& index, const Register& temp) { @@ -3219,7 +3219,7 @@ void TurboAssembler::AtomicDecompressAnyTagged(const Register& destination, Add(destination, kPtrComprCageBaseRegister, destination); } -void TurboAssembler::CheckPageFlag(const Register& object, int mask, +void MacroAssembler::CheckPageFlag(const Register& object, int mask, Condition cc, Label* condition_met) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -3273,7 +3273,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Bind(&done); } -void TurboAssembler::DecodeSandboxedPointer(const Register& value) { +void MacroAssembler::DecodeSandboxedPointer(const Register& value) { ASM_CODE_COMMENT(this); #ifdef V8_ENABLE_SANDBOX Add(value, kPtrComprCageBaseRegister, @@ -3283,7 +3283,7 @@ void TurboAssembler::DecodeSandboxedPointer(const Register& value) { #endif } -void TurboAssembler::LoadSandboxedPointerField( +void MacroAssembler::LoadSandboxedPointerField( const Register& destination, const MemOperand& field_operand) { #ifdef V8_ENABLE_SANDBOX ASM_CODE_COMMENT(this); @@ -3294,7 +3294,7 @@ void TurboAssembler::LoadSandboxedPointerField( #endif } -void TurboAssembler::StoreSandboxedPointerField( +void MacroAssembler::StoreSandboxedPointerField( const Register& value, const MemOperand& dst_field_operand) { #ifdef V8_ENABLE_SANDBOX ASM_CODE_COMMENT(this); @@ -3308,7 +3308,7 @@ void TurboAssembler::StoreSandboxedPointerField( #endif } -void TurboAssembler::LoadExternalPointerField(Register destination, +void MacroAssembler::LoadExternalPointerField(Register destination, MemOperand field_operand, ExternalPointerTag tag, Register isolate_root) { @@ -3340,66 +3340,67 @@ void TurboAssembler::LoadExternalPointerField(Register destination, #endif // V8_ENABLE_SANDBOX } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { - if (registers.is_empty()) return; - ASM_CODE_COMMENT(this); - CPURegList regs(kXRegSizeInBits, registers); - // If we were saving LR, we might need to sign it. - DCHECK(!regs.IncludesAliasOf(lr)); - regs.Align(); - PushCPURegList(regs); +void MacroAssembler::MaybeSaveRegisters(RegList registers) { + if (registers.is_empty()) return; + ASM_CODE_COMMENT(this); + CPURegList regs(kXRegSizeInBits, registers); + // If we were saving LR, we might need to sign it. + DCHECK(!regs.IncludesAliasOf(lr)); + regs.Align(); + PushCPURegList(regs); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { - if (registers.is_empty()) return; - ASM_CODE_COMMENT(this); - CPURegList regs(kXRegSizeInBits, registers); - // If we were saving LR, we might need to sign it. - DCHECK(!regs.IncludesAliasOf(lr)); - regs.Align(); - PopCPURegList(regs); +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { + if (registers.is_empty()) return; + ASM_CODE_COMMENT(this); + CPURegList regs(kXRegSizeInBits, registers); + // If we were saving LR, we might need to sign it. + DCHECK(!regs.IncludesAliasOf(lr)); + regs.Align(); + PopCPURegList(regs); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode) { - ASM_CODE_COMMENT(this); - RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); - MaybeSaveRegisters(registers); + ASM_CODE_COMMENT(this); + RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); + MaybeSaveRegisters(registers); - MoveObjectAndSlot(WriteBarrierDescriptor::ObjectRegister(), - WriteBarrierDescriptor::SlotAddressRegister(), object, - offset); + MoveObjectAndSlot(WriteBarrierDescriptor::ObjectRegister(), + WriteBarrierDescriptor::SlotAddressRegister(), object, + offset); - Call(isolate()->builtins()->code_handle( - Builtins::GetEphemeronKeyBarrierStub(fp_mode)), - RelocInfo::CODE_TARGET); - MaybeRestoreRegisters(registers); + Call(isolate()->builtins()->code_handle( + Builtins::GetEphemeronKeyBarrierStub(fp_mode)), + RelocInfo::CODE_TARGET); + MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode) { - ASM_CODE_COMMENT(this); - RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); - MaybeSaveRegisters(registers); + ASM_CODE_COMMENT(this); + RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); + MaybeSaveRegisters(registers); - Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); - Register slot_address_parameter = - WriteBarrierDescriptor::SlotAddressRegister(); - MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset); + Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); + Register slot_address_parameter = + WriteBarrierDescriptor::SlotAddressRegister(); + MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset); - CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode); + CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, + mode); - MaybeRestoreRegisters(registers); + MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { - ASM_CODE_COMMENT(this); - DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object); - DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address); + ASM_CODE_COMMENT(this); + DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object); + DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address); #if V8_ENABLE_WEBASSEMBLY if (mode == StubCallMode::kCallWasmRuntimeStub) { auto wasm_target = wasm::WasmCode::GetRecordWriteStub(fp_mode); @@ -3413,7 +3414,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, } } -void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, +void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, Register object, Operand offset) { ASM_CODE_COMMENT(this); DCHECK_NE(dst_object, dst_slot); @@ -3490,7 +3491,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, // Record the actual write. if (lr_status == kLRHasNotBeenSaved) { - Push(padreg, lr); + Push(padreg, lr); } Register slot_address = WriteBarrierDescriptor::SlotAddressRegister(); DCHECK(!AreAliased(object, slot_address, value)); @@ -3499,14 +3500,14 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, Add(slot_address, object, offset); CallRecordWriteStub(object, slot_address, fp_mode); if (lr_status == kLRHasNotBeenSaved) { - Pop(lr, padreg); + Pop(lr, padreg); } if (v8_flags.debug_code) Mov(slot_address, Operand(kZapValue)); Bind(&done); } -void TurboAssembler::Check(Condition cond, AbortReason reason) { +void MacroAssembler::Check(Condition cond, AbortReason reason) { Label ok; B(cond, &ok); Abort(reason); @@ -3514,10 +3515,10 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) { Bind(&ok); } -void TurboAssembler::Trap() { Brk(0); } -void TurboAssembler::DebugBreak() { Debug("DebugBreak", 0, BREAK); } +void MacroAssembler::Trap() { Brk(0); } +void MacroAssembler::DebugBreak() { Debug("DebugBreak", 0, BREAK); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { ASM_CODE_COMMENT(this); if (v8_flags.code_comments) { RecordComment("Abort message: "); @@ -3610,7 +3611,7 @@ void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, // This is the main Printf implementation. All other Printf variants call // PrintfNoPreserve after setting up one or more PreserveRegisterScopes. -void TurboAssembler::PrintfNoPreserve(const char* format, +void MacroAssembler::PrintfNoPreserve(const char* format, const CPURegister& arg0, const CPURegister& arg1, const CPURegister& arg2, @@ -3644,7 +3645,7 @@ void TurboAssembler::PrintfNoPreserve(const char* format, fp_tmp_list.Remove(kPCSVarargsFP); fp_tmp_list.Remove(arg0, arg1, arg2, arg3); - // Override the TurboAssembler's scratch register list. The lists will be + // Override the MacroAssembler's scratch register list. The lists will be // reset automatically at the end of the UseScratchRegisterScope. UseScratchRegisterScope temps(this); TmpList()->set_bits(tmp_list.bits()); @@ -3760,7 +3761,7 @@ void TurboAssembler::PrintfNoPreserve(const char* format, CallPrintf(arg_count, pcs); } -void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) { +void MacroAssembler::CallPrintf(int arg_count, const CPURegister* args) { ASM_CODE_COMMENT(this); // A call to printf needs special handling for the simulator, since the system // printf function will use a different instruction set and the procedure-call @@ -3790,7 +3791,7 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) { Call(ExternalReference::printf_function()); } -void TurboAssembler::Printf(const char* format, CPURegister arg0, +void MacroAssembler::Printf(const char* format, CPURegister arg0, CPURegister arg1, CPURegister arg2, CPURegister arg3) { ASM_CODE_COMMENT(this); @@ -3889,12 +3890,12 @@ CPURegister UseScratchRegisterScope::AcquireNextAvailable( return result; } -void TurboAssembler::ComputeCodeStartAddress(const Register& rd) { +void MacroAssembler::ComputeCodeStartAddress(const Register& rd) { // We can use adr to load a pc relative location. adr(rd, -pc_offset()); } -void TurboAssembler::RestoreFPAndLR() { +void MacroAssembler::RestoreFPAndLR() { static_assert(StandardFrameConstants::kCallerFPOffset + kSystemPointerSize == StandardFrameConstants::kCallerPCOffset, "Offsets must be consecutive for ldp!"); @@ -3913,7 +3914,7 @@ void TurboAssembler::RestoreFPAndLR() { } #if V8_ENABLE_WEBASSEMBLY -void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) { +void MacroAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) { UseScratchRegisterScope temps(this); temps.Exclude(x16, x17); Adr(x17, return_location); @@ -3925,7 +3926,7 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) { } #endif // V8_ENABLE_WEBASSEMBLY -void TurboAssembler::PopcntHelper(Register dst, Register src) { +void MacroAssembler::PopcntHelper(Register dst, Register src) { UseScratchRegisterScope temps(this); VRegister scratch = temps.AcquireV(kFormat8B); VRegister tmp = src.Is32Bits() ? scratch.S() : scratch.D(); @@ -3935,7 +3936,7 @@ void TurboAssembler::PopcntHelper(Register dst, Register src) { Fmov(dst, tmp); } -void TurboAssembler::I64x2BitMask(Register dst, VRegister src) { +void MacroAssembler::I64x2BitMask(Register dst, VRegister src) { ASM_CODE_COMMENT(this); UseScratchRegisterScope scope(this); VRegister tmp1 = scope.AcquireV(kFormat2D); @@ -3946,7 +3947,7 @@ void TurboAssembler::I64x2BitMask(Register dst, VRegister src) { Add(dst.W(), dst.W(), Operand(tmp2.W(), LSL, 1)); } -void TurboAssembler::I64x2AllTrue(Register dst, VRegister src) { +void MacroAssembler::I64x2AllTrue(Register dst, VRegister src) { ASM_CODE_COMMENT(this); UseScratchRegisterScope scope(this); VRegister tmp = scope.AcquireV(kFormat2D); diff --git a/src/codegen/arm64/macro-assembler-arm64.h b/src/codegen/arm64/macro-assembler-arm64.h index b4c9060845..9c3fcc640b 100644 --- a/src/codegen/arm64/macro-assembler-arm64.h +++ b/src/codegen/arm64/macro-assembler-arm64.h @@ -146,9 +146,9 @@ enum PreShiftImmMode { // platforms are updated. enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit }; -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; #if DEBUG void set_allow_macro_instructions(bool value) { @@ -1484,81 +1484,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ExternalPointerTag tag, Register isolate_root = Register::no_reg()); - protected: - // The actual Push and Pop implementations. These don't generate any code - // other than that required for the push or pop. This allows - // (Push|Pop)CPURegList to bundle together run-time assertions for a large - // block of registers. - // - // Note that size is per register, and is specified in bytes. - void PushHelper(int count, int size, const CPURegister& src0, - const CPURegister& src1, const CPURegister& src2, - const CPURegister& src3); - void PopHelper(int count, int size, const CPURegister& dst0, - const CPURegister& dst1, const CPURegister& dst2, - const CPURegister& dst3); - - void ConditionalCompareMacro(const Register& rn, const Operand& operand, - StatusFlags nzcv, Condition cond, - ConditionalCompareOp op); - - void AddSubWithCarryMacro(const Register& rd, const Register& rn, - const Operand& operand, FlagsUpdate S, - AddSubWithCarryOp op); - - // Call Printf. On a native build, a simple call will be generated, but if the - // simulator is being used then a suitable pseudo-instruction is used. The - // arguments and stack must be prepared by the caller as for a normal AAPCS64 - // call to 'printf'. - // - // The 'args' argument should point to an array of variable arguments in their - // proper PCS registers (and in calling order). The argument registers can - // have mixed types. The format string (x0) should not be included. - void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr); - - private: -#if DEBUG - // Tell whether any of the macro instruction can be used. When false the - // MacroAssembler will assert if a method which can emit a variable number - // of instructions is called. - bool allow_macro_instructions_ = true; -#endif - - // Scratch registers available for use by the MacroAssembler. - CPURegList tmp_list_ = DefaultTmpList(); - CPURegList fptmp_list_ = DefaultFPTmpList(); - - // Helps resolve branching to labels potentially out of range. - // If the label is not bound, it registers the information necessary to later - // be able to emit a veneer for this branch if necessary. - // If the label is bound, it returns true if the label (or the previous link - // in the label chain) is out of range. In that case the caller is responsible - // for generating appropriate code. - // Otherwise it returns false. - // This function also checks wether veneers need to be emitted. - bool NeedExtraInstructionsOrRegisterBranch(Label* label, - ImmBranchType branch_type); - - void Movi16bitHelper(const VRegister& vd, uint64_t imm); - void Movi32bitHelper(const VRegister& vd, uint64_t imm); - void Movi64bitHelper(const VRegister& vd, uint64_t imm); - - void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr, - LoadStoreOp op); - - void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, - const MemOperand& addr, LoadStorePairOp op); - - int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode, - byte* pc); - - void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al); -}; - -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - // Instruction set functions ------------------------------------------------ // Logical macros. inline void Bics(const Register& rd, const Register& rn, @@ -1594,18 +1519,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { Condition cond); inline void Extr(const Register& rd, const Register& rn, const Register& rm, unsigned lsb); - void Fcvtl(const VRegister& vd, const VRegister& vn) { - DCHECK(allow_macro_instructions()); - fcvtl(vd, vn); - } void Fcvtl2(const VRegister& vd, const VRegister& vn) { DCHECK(allow_macro_instructions()); fcvtl2(vd, vn); } - void Fcvtn(const VRegister& vd, const VRegister& vn) { - DCHECK(allow_macro_instructions()); - fcvtn(vd, vn); - } void Fcvtn2(const VRegister& vd, const VRegister& vn) { DCHECK(allow_macro_instructions()); fcvtn2(vd, vn); @@ -1641,7 +1558,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { DCHECK(allow_macro_instructions()); mvni(vd, imm8, shift, shift_amount); } - inline void Rev(const Register& rd, const Register& rn); inline void Smaddl(const Register& rd, const Register& rn, const Register& rm, const Register& ra); inline void Smsubl(const Register& rd, const Register& rn, const Register& rm, @@ -2139,6 +2055,76 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { Register feedback_vector, FeedbackSlot slot, Label* on_result, Label::Distance distance); + protected: + // The actual Push and Pop implementations. These don't generate any code + // other than that required for the push or pop. This allows + // (Push|Pop)CPURegList to bundle together run-time assertions for a large + // block of registers. + // + // Note that size is per register, and is specified in bytes. + void PushHelper(int count, int size, const CPURegister& src0, + const CPURegister& src1, const CPURegister& src2, + const CPURegister& src3); + void PopHelper(int count, int size, const CPURegister& dst0, + const CPURegister& dst1, const CPURegister& dst2, + const CPURegister& dst3); + + void ConditionalCompareMacro(const Register& rn, const Operand& operand, + StatusFlags nzcv, Condition cond, + ConditionalCompareOp op); + + void AddSubWithCarryMacro(const Register& rd, const Register& rn, + const Operand& operand, FlagsUpdate S, + AddSubWithCarryOp op); + + // Call Printf. On a native build, a simple call will be generated, but if the + // simulator is being used then a suitable pseudo-instruction is used. The + // arguments and stack must be prepared by the caller as for a normal AAPCS64 + // call to 'printf'. + // + // The 'args' argument should point to an array of variable arguments in their + // proper PCS registers (and in calling order). The argument registers can + // have mixed types. The format string (x0) should not be included. + void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr); + + private: +#if DEBUG + // Tell whether any of the macro instruction can be used. When false the + // MacroAssembler will assert if a method which can emit a variable number + // of instructions is called. + bool allow_macro_instructions_ = true; +#endif + + // Scratch registers available for use by the MacroAssembler. + CPURegList tmp_list_ = DefaultTmpList(); + CPURegList fptmp_list_ = DefaultFPTmpList(); + + // Helps resolve branching to labels potentially out of range. + // If the label is not bound, it registers the information necessary to later + // be able to emit a veneer for this branch if necessary. + // If the label is bound, it returns true if the label (or the previous link + // in the label chain) is out of range. In that case the caller is responsible + // for generating appropriate code. + // Otherwise it returns false. + // This function also checks wether veneers need to be emitted. + bool NeedExtraInstructionsOrRegisterBranch(Label* label, + ImmBranchType branch_type); + + void Movi16bitHelper(const VRegister& vd, uint64_t imm); + void Movi32bitHelper(const VRegister& vd, uint64_t imm); + void Movi64bitHelper(const VRegister& vd, uint64_t imm); + + void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr, + LoadStoreOp op); + + void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, + const MemOperand& addr, LoadStorePairOp op); + + int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode, + byte* pc); + + void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al); + DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); }; @@ -2148,38 +2134,38 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // emitted is what you specified when creating the scope. class V8_NODISCARD InstructionAccurateScope { public: - explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0) - : tasm_(tasm), - block_pool_(tasm, count * kInstrSize) + explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0) + : masm_(masm), + block_pool_(masm, count * kInstrSize) #ifdef DEBUG , size_(count * kInstrSize) #endif { - tasm_->CheckVeneerPool(false, true, count * kInstrSize); - tasm_->StartBlockVeneerPool(); + masm_->CheckVeneerPool(false, true, count * kInstrSize); + masm_->StartBlockVeneerPool(); #ifdef DEBUG if (count != 0) { - tasm_->bind(&start_); + masm_->bind(&start_); } - previous_allow_macro_instructions_ = tasm_->allow_macro_instructions(); - tasm_->set_allow_macro_instructions(false); + previous_allow_macro_instructions_ = masm_->allow_macro_instructions(); + masm_->set_allow_macro_instructions(false); #endif } ~InstructionAccurateScope() { - tasm_->EndBlockVeneerPool(); + masm_->EndBlockVeneerPool(); #ifdef DEBUG if (start_.is_bound()) { - DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_); + DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_); } - tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_); + masm_->set_allow_macro_instructions(previous_allow_macro_instructions_); #endif } private: - TurboAssembler* tasm_; - TurboAssembler::BlockConstPoolScope block_pool_; + MacroAssembler* masm_; + MacroAssembler::BlockConstPoolScope block_pool_; #ifdef DEBUG size_t size_; Label start_; @@ -2188,7 +2174,7 @@ class V8_NODISCARD InstructionAccurateScope { }; // This scope utility allows scratch registers to be managed safely. The -// TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch +// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch // registers. These registers can be allocated on demand, and will be returned // at the end of the scope. // @@ -2198,9 +2184,9 @@ class V8_NODISCARD InstructionAccurateScope { // order as the constructors. We do not have assertions for this. class V8_NODISCARD UseScratchRegisterScope { public: - explicit UseScratchRegisterScope(TurboAssembler* tasm) - : available_(tasm->TmpList()), - availablefp_(tasm->FPTmpList()), + explicit UseScratchRegisterScope(MacroAssembler* masm) + : available_(masm->TmpList()), + availablefp_(masm->FPTmpList()), old_available_(available_->bits()), old_availablefp_(availablefp_->bits()) { DCHECK_EQ(available_->type(), CPURegister::kRegister); diff --git a/src/codegen/ia32/macro-assembler-ia32.cc b/src/codegen/ia32/macro-assembler-ia32.cc index f23df0884e..994e01288c 100644 --- a/src/codegen/ia32/macro-assembler-ia32.cc +++ b/src/codegen/ia32/macro-assembler-ia32.cc @@ -21,11 +21,11 @@ #include "src/codegen/ia32/register-ia32.h" #include "src/codegen/interface-descriptors-inl.h" #include "src/codegen/label.h" +#include "src/codegen/macro-assembler-base.h" #include "src/codegen/macro-assembler.h" #include "src/codegen/register.h" #include "src/codegen/reglist.h" #include "src/codegen/reloc-info.h" -#include "src/codegen/turbo-assembler.h" #include "src/common/globals.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frame-constants.h" @@ -77,18 +77,18 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int index) const { // ------------------------------------------------------------------------- // MacroAssembler implementation. -void TurboAssembler::InitializeRootRegister() { +void MacroAssembler::InitializeRootRegister() { ASM_CODE_COMMENT(this); ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); Move(kRootRegister, Immediate(isolate_root)); } -Operand TurboAssembler::RootAsOperand(RootIndex index) { +Operand MacroAssembler::RootAsOperand(RootIndex index) { DCHECK(root_array_available()); return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadRoot(Register destination, RootIndex index) { ASM_CODE_COMMENT(this); if (root_array_available()) { mov(destination, RootAsOperand(index)); @@ -113,7 +113,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) { mov(destination, Operand(destination, RootRegisterOffsetForRootIndex(index))); } -void TurboAssembler::CompareRoot(Register with, Register scratch, +void MacroAssembler::CompareRoot(Register with, Register scratch, RootIndex index) { ASM_CODE_COMMENT(this); if (root_array_available()) { @@ -126,7 +126,7 @@ void TurboAssembler::CompareRoot(Register with, Register scratch, } } -void TurboAssembler::CompareRoot(Register with, RootIndex index) { +void MacroAssembler::CompareRoot(Register with, RootIndex index) { ASM_CODE_COMMENT(this); if (root_array_available()) { cmp(with, RootAsOperand(index)); @@ -180,7 +180,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, j(below_equal, on_in_range, near_jump); } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, PushArrayOrder order) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(array, size, scratch)); @@ -206,7 +206,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch, } } -Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference, +Operand MacroAssembler::ExternalReferenceAsOperand(ExternalReference reference, Register scratch) { if (root_array_available() && options().enable_root_relative_access) { intptr_t delta = @@ -233,8 +233,8 @@ Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference, } // TODO(v8:6666): If possible, refactor into a platform-independent function in -// TurboAssembler. -Operand TurboAssembler::ExternalReferenceAddressAsOperand( +// MacroAssembler. +Operand MacroAssembler::ExternalReferenceAddressAsOperand( ExternalReference reference) { DCHECK(root_array_available()); DCHECK(options().isolate_independent_code); @@ -244,8 +244,8 @@ Operand TurboAssembler::ExternalReferenceAddressAsOperand( } // TODO(v8:6666): If possible, refactor into a platform-independent function in -// TurboAssembler. -Operand TurboAssembler::HeapObjectAsOperand(Handle object) { +// MacroAssembler. +Operand MacroAssembler::HeapObjectAsOperand(Handle object) { DCHECK(root_array_available()); Builtin builtin; @@ -264,7 +264,7 @@ Operand TurboAssembler::HeapObjectAsOperand(Handle object) { } } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { ASM_CODE_COMMENT(this); DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); @@ -273,7 +273,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index))); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { ASM_CODE_COMMENT(this); DCHECK(is_int32(offset)); @@ -285,13 +285,13 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { ASM_CODE_COMMENT(this); DCHECK(root_array_available()); mov(destination, Operand(kRootRegister, offset)); } -void TurboAssembler::LoadAddress(Register destination, +void MacroAssembler::LoadAddress(Register destination, ExternalReference source) { // TODO(jgruber): Add support for enable_root_relative_access. if (root_array_available() && options().isolate_independent_code) { @@ -301,7 +301,7 @@ void TurboAssembler::LoadAddress(Register destination, mov(destination, Immediate(source)); } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) const { int bytes = 0; RegList saved_regs = kCallerSaved - exclusion; @@ -315,7 +315,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { ASM_CODE_COMMENT(this); // We don't allow a GC in a write barrier slow path so there is no need to @@ -346,7 +346,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { ASM_CODE_COMMENT(this); int bytes = 0; if (fp_mode == SaveFPRegsMode::kSave) { @@ -412,19 +412,19 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { for (Register reg : registers) { push(reg); } } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { for (Register reg : base::Reversed(registers)) { pop(reg); } } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode) { ASM_CODE_COMMENT(this); @@ -449,7 +449,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -473,7 +473,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { ASM_CODE_COMMENT(this); @@ -547,17 +547,17 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address, } } -void TurboAssembler::Cvtsi2ss(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtsi2ss(XMMRegister dst, Operand src) { xorps(dst, dst); cvtsi2ss(dst, src); } -void TurboAssembler::Cvtsi2sd(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtsi2sd(XMMRegister dst, Operand src) { xorpd(dst, dst); cvtsi2sd(dst, src); } -void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) { +void MacroAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) { Label done; Register src_reg = src.is_reg_only() ? src.reg() : tmp; if (src_reg == tmp) mov(tmp, src); @@ -578,7 +578,7 @@ void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) { bind(&done); } -void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) { +void MacroAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) { Label done; cvttss2si(dst, src); test(dst, dst); @@ -590,7 +590,7 @@ void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) { bind(&done); } -void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) { +void MacroAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) { Label done; cmp(src, Immediate(0)); ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias(); @@ -600,14 +600,14 @@ void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) { bind(&done); } -void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) { +void MacroAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) { Move(tmp, -2147483648.0); addsd(tmp, src); cvttsd2si(dst, tmp); add(dst, Immediate(0x80000000)); } -void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) { +void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) { DCHECK_GE(63, shift); if (shift >= 32) { mov(high, low); @@ -619,7 +619,7 @@ void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) { } } -void TurboAssembler::ShlPair_cl(Register high, Register low) { +void MacroAssembler::ShlPair_cl(Register high, Register low) { ASM_CODE_COMMENT(this); shld_cl(high, low); shl_cl(low); @@ -631,7 +631,7 @@ void TurboAssembler::ShlPair_cl(Register high, Register low) { bind(&done); } -void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) { +void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) { DCHECK_GE(63, shift); if (shift >= 32) { mov(low, high); @@ -643,7 +643,7 @@ void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) { } } -void TurboAssembler::ShrPair_cl(Register high, Register low) { +void MacroAssembler::ShrPair_cl(Register high, Register low) { ASM_CODE_COMMENT(this); shrd_cl(low, high); shr_cl(high); @@ -655,7 +655,7 @@ void TurboAssembler::ShrPair_cl(Register high, Register low) { bind(&done); } -void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) { +void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) { ASM_CODE_COMMENT(this); DCHECK_GE(63, shift); if (shift >= 32) { @@ -668,7 +668,7 @@ void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) { } } -void TurboAssembler::SarPair_cl(Register high, Register low) { +void MacroAssembler::SarPair_cl(Register high, Register low) { ASM_CODE_COMMENT(this); shrd_cl(low, high); sar_cl(high); @@ -680,7 +680,7 @@ void TurboAssembler::SarPair_cl(Register high, Register low) { bind(&done); } -void TurboAssembler::LoadMap(Register destination, Register object) { +void MacroAssembler::LoadMap(Register destination, Register object) { mov(destination, FieldOperand(object, HeapObject::kMapOffset)); } @@ -979,23 +979,23 @@ void MacroAssembler::AssertNotSmi(Register object) { } } -void TurboAssembler::Assert(Condition cc, AbortReason reason) { +void MacroAssembler::Assert(Condition cc, AbortReason reason) { if (v8_flags.debug_code) Check(cc, reason); } -void TurboAssembler::AssertUnreachable(AbortReason reason) { +void MacroAssembler::AssertUnreachable(AbortReason reason) { if (v8_flags.debug_code) Abort(reason); } #endif // V8_ENABLE_DEBUG_CODE -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { ASM_CODE_COMMENT(this); push(ebp); // Caller's frame pointer. mov(ebp, esp); push(Immediate(StackFrame::TypeToMarker(type))); } -void TurboAssembler::Prologue() { +void MacroAssembler::Prologue() { ASM_CODE_COMMENT(this); push(ebp); // Caller's frame pointer. mov(ebp, esp); @@ -1004,7 +1004,7 @@ void TurboAssembler::Prologue() { push(kJavaScriptCallArgCountRegister); // Actual argument count. } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode) { int receiver_bytes = (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0; @@ -1034,7 +1034,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArguments(Register count, Register scratch, +void MacroAssembler::DropArguments(Register count, Register scratch, ArgumentsCountType type, ArgumentsCountMode mode) { DCHECK(!AreAliased(count, scratch)); @@ -1043,7 +1043,7 @@ void TurboAssembler::DropArguments(Register count, Register scratch, PushReturnAddressFrom(scratch); } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, Register scratch, ArgumentsCountType type, @@ -1055,7 +1055,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, PushReturnAddressFrom(scratch); } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Operand receiver, Register scratch, ArgumentsCountType type, @@ -1068,7 +1068,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, PushReturnAddressFrom(scratch); } -void TurboAssembler::EnterFrame(StackFrame::Type type) { +void MacroAssembler::EnterFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); push(ebp); mov(ebp, esp); @@ -1080,7 +1080,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { #endif // V8_ENABLE_WEBASSEMBLY } -void TurboAssembler::LeaveFrame(StackFrame::Type type) { +void MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); if (v8_flags.debug_code && !StackFrame::IsJavaScript(type)) { cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset), @@ -1091,7 +1091,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) { } #ifdef V8_OS_WIN -void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { +void MacroAssembler::AllocateStackSpace(Register bytes_scratch) { ASM_CODE_COMMENT(this); // In windows, we cannot increment the stack size by more than one page // (minimum page size is 4KB) without accessing at least one byte on the @@ -1113,7 +1113,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { sub(esp, bytes_scratch); } -void TurboAssembler::AllocateStackSpace(int bytes) { +void MacroAssembler::AllocateStackSpace(int bytes) { ASM_CODE_COMMENT(this); DCHECK_GE(bytes, 0); while (bytes >= kStackPageSize) { @@ -1332,10 +1332,10 @@ void MacroAssembler::CompareStackLimit(Register with, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); cmp(with, Operand(kRootRegister, offset)); } @@ -1565,9 +1565,9 @@ void MacroAssembler::LoadNativeContextSlot(Register destination, int index) { mov(destination, Operand(destination, Context::SlotOffset(index))); } -void TurboAssembler::Ret() { ret(0); } +void MacroAssembler::Ret() { ret(0); } -void TurboAssembler::Ret(int bytes_dropped, Register scratch) { +void MacroAssembler::Ret(int bytes_dropped, Register scratch) { if (is_uint16(bytes_dropped)) { ret(bytes_dropped); } else { @@ -1578,7 +1578,7 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) { } } -void TurboAssembler::Push(Immediate value) { +void MacroAssembler::Push(Immediate value) { if (root_array_available() && options().isolate_independent_code) { if (value.is_embedded_object()) { Push(HeapObjectAsOperand(value.embedded_object())); @@ -1597,13 +1597,13 @@ void MacroAssembler::Drop(int stack_elements) { } } -void TurboAssembler::Move(Register dst, Register src) { +void MacroAssembler::Move(Register dst, Register src) { if (dst != src) { mov(dst, src); } } -void TurboAssembler::Move(Register dst, const Immediate& src) { +void MacroAssembler::Move(Register dst, const Immediate& src) { if (!src.is_heap_number_request() && src.is_zero()) { xor_(dst, dst); // Shorter than mov of 32-bit immediate 0. } else if (src.is_external_reference()) { @@ -1613,7 +1613,7 @@ void TurboAssembler::Move(Register dst, const Immediate& src) { } } -void TurboAssembler::Move(Operand dst, const Immediate& src) { +void MacroAssembler::Move(Operand dst, const Immediate& src) { // Since there's no scratch register available, take a detour through the // stack. if (root_array_available() && options().isolate_independent_code) { @@ -1632,9 +1632,9 @@ void TurboAssembler::Move(Operand dst, const Immediate& src) { } } -void TurboAssembler::Move(Register dst, Operand src) { mov(dst, src); } +void MacroAssembler::Move(Register dst, Operand src) { mov(dst, src); } -void TurboAssembler::Move(Register dst, Handle src) { +void MacroAssembler::Move(Register dst, Handle src) { if (root_array_available() && options().isolate_independent_code) { IndirectLoadConstant(dst, src); return; @@ -1642,7 +1642,7 @@ void TurboAssembler::Move(Register dst, Handle src) { mov(dst, src); } -void TurboAssembler::Move(XMMRegister dst, uint32_t src) { +void MacroAssembler::Move(XMMRegister dst, uint32_t src) { if (src == 0) { pxor(dst, dst); } else { @@ -1666,7 +1666,7 @@ void TurboAssembler::Move(XMMRegister dst, uint32_t src) { } } -void TurboAssembler::Move(XMMRegister dst, uint64_t src) { +void MacroAssembler::Move(XMMRegister dst, uint64_t src) { if (src == 0) { pxor(dst, dst); } else { @@ -1705,7 +1705,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) { } } -void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src, +void MacroAssembler::PextrdPreSse41(Register dst, XMMRegister src, uint8_t imm8) { if (imm8 == 0) { Movd(dst, src); @@ -1721,7 +1721,7 @@ void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src, add(esp, Immediate(kDoubleSize)); } -void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8, +void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8, uint32_t* load_pc_offset) { // Without AVX or SSE, we can only have 64-bit values in xmm registers. // We don't have an xmm scratch register, so move the data via the stack. This @@ -1742,7 +1742,7 @@ void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8, add(esp, Immediate(kDoubleSize)); } -void TurboAssembler::Lzcnt(Register dst, Operand src) { +void MacroAssembler::Lzcnt(Register dst, Operand src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcnt(dst, src); @@ -1756,7 +1756,7 @@ void TurboAssembler::Lzcnt(Register dst, Operand src) { xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x. } -void TurboAssembler::Tzcnt(Register dst, Operand src) { +void MacroAssembler::Tzcnt(Register dst, Operand src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcnt(dst, src); @@ -1769,7 +1769,7 @@ void TurboAssembler::Tzcnt(Register dst, Operand src) { bind(¬_zero_src); } -void TurboAssembler::Popcnt(Register dst, Operand src) { +void MacroAssembler::Popcnt(Register dst, Operand src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcnt(dst, src); @@ -1816,7 +1816,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, } } -void TurboAssembler::Check(Condition cc, AbortReason reason) { +void MacroAssembler::Check(Condition cc, AbortReason reason) { Label L; j(cc, &L); Abort(reason); @@ -1824,7 +1824,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason) { bind(&L); } -void TurboAssembler::CheckStackAlignment() { +void MacroAssembler::CheckStackAlignment() { ASM_CODE_COMMENT(this); int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; @@ -1839,7 +1839,7 @@ void TurboAssembler::CheckStackAlignment() { } } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { if (v8_flags.code_comments) { const char* msg = GetAbortReason(reason); RecordComment("Abort message: "); @@ -1882,7 +1882,7 @@ void TurboAssembler::Abort(AbortReason reason) { int3(); } -void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { +void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { ASM_CODE_COMMENT(this); int frame_alignment = base::OS::ActivationFrameAlignment(); if (frame_alignment != 0) { @@ -1898,14 +1898,14 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { } } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { // Trashing eax is ok as it will be the return value. Move(eax, Immediate(function)); CallCFunction(eax, num_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { ASM_CODE_COMMENT(this); DCHECK_LE(num_arguments, kMaxCParameters); DCHECK(has_frame()); @@ -1956,7 +1956,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) { } } -void TurboAssembler::PushPC() { +void MacroAssembler::PushPC() { // Push the current PC onto the stack as "return address" via calling // the next instruction. Label get_pc; @@ -1964,7 +1964,7 @@ void TurboAssembler::PushPC() { bind(&get_pc); } -void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { +void MacroAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { ASM_CODE_COMMENT(this); DCHECK_IMPLIES(options().isolate_independent_code, Builtins::IsIsolateIndependentBuiltin(*code_object)); @@ -1977,7 +1977,7 @@ void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { call(code_object, rmode); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ASM_CODE_COMMENT(this); static_assert(kSystemPointerSize == 4); static_assert(kSmiShiftSize == 0); @@ -1993,13 +1993,13 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { IsolateData::builtin_entry_table_offset())); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { ASM_CODE_COMMENT(this); LoadEntryFromBuiltinIndex(builtin_index); call(builtin_index); } -void TurboAssembler::CallBuiltin(Builtin builtin) { +void MacroAssembler::CallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); switch (options().builtin_call_jump_mode) { case BuiltinCallJumpMode::kAbsolute: { @@ -2019,7 +2019,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin) { +void MacroAssembler::TailCallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); switch (options().builtin_call_jump_mode) { @@ -2040,17 +2040,17 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) { } } -Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +Operand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { ASM_CODE_COMMENT(this); return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) { ASM_CODE_COMMENT(this); mov(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -2058,12 +2058,12 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, sub(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object) { LoadCodeEntry(code_object, code_object); call(code_object); } -void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { LoadCodeEntry(code_object, code_object); switch (jump_mode) { case JumpMode::kJump: @@ -2076,13 +2076,13 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { } } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { DCHECK(root_array_available()); jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry( isolate(), reference))); } -void TurboAssembler::Jump(Handle code_object, RelocInfo::Mode rmode) { +void MacroAssembler::Jump(Handle code_object, RelocInfo::Mode rmode) { DCHECK_IMPLIES(options().isolate_independent_code, Builtins::IsIsolateIndependentBuiltin(*code_object)); Builtin builtin = Builtin::kNoBuiltinId; @@ -2094,7 +2094,7 @@ void TurboAssembler::Jump(Handle code_object, RelocInfo::Mode rmode) { jmp(code_object, rmode); } -void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, +void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met, Label::Distance condition_met_distance) { ASM_CODE_COMMENT(this); @@ -2113,7 +2113,7 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, j(cc, condition_met, condition_met_distance); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { ASM_CODE_COMMENT(this); // In order to get the address of the current instruction, we first need // to use a call and then use a pop, thus pushing the return address to @@ -2128,7 +2128,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { } } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); @@ -2138,8 +2138,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::Trap() { int3(); } -void TurboAssembler::DebugBreak() { int3(); } +void MacroAssembler::Trap() { int3(); } +void MacroAssembler::DebugBreak() { int3(); } } // namespace internal } // namespace v8 diff --git a/src/codegen/ia32/macro-assembler-ia32.h b/src/codegen/ia32/macro-assembler-ia32.h index 485ed210fa..273eea23ff 100644 --- a/src/codegen/ia32/macro-assembler-ia32.h +++ b/src/codegen/ia32/macro-assembler-ia32.h @@ -21,10 +21,10 @@ #include "src/codegen/ia32/assembler-ia32.h" #include "src/codegen/ia32/register-ia32.h" #include "src/codegen/label.h" +#include "src/codegen/macro-assembler-base.h" #include "src/codegen/reglist.h" #include "src/codegen/reloc-info.h" #include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h" -#include "src/codegen/turbo-assembler.h" #include "src/common/globals.h" #include "src/execution/frames.h" #include "src/handles/handles.h" @@ -68,10 +68,10 @@ class StackArgumentsAccessor { DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor); }; -class V8_EXPORT_PRIVATE TurboAssembler - : public SharedTurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler + : public SharedMacroAssembler { public: - using SharedTurboAssemblerBase::SharedTurboAssemblerBase; + using SharedMacroAssembler::SharedMacroAssembler; void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met, @@ -411,17 +411,6 @@ class V8_EXPORT_PRIVATE TurboAssembler // Define an exception handler and bind a label. void BindExceptionHandler(Label* label) { bind(label); } - protected: - // Drops arguments assuming that the return address was already popped. - void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger, - ArgumentsCountMode mode = kCountExcludesReceiver); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - void PushRoot(RootIndex index); // Compare the object in a register to a value and jump if they are equal. @@ -671,6 +660,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { void StackOverflowCheck(Register num_args, Register scratch, Label* stack_overflow, bool include_receiver = false); + protected: + // Drops arguments assuming that the return address was already popped. + void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger, + ArgumentsCountMode mode = kCountExcludesReceiver); + private: // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, diff --git a/src/codegen/loong64/macro-assembler-loong64.cc b/src/codegen/loong64/macro-assembler-loong64.cc index f4625dd5a8..3795e9ccac 100644 --- a/src/codegen/loong64/macro-assembler-loong64.cc +++ b/src/codegen/loong64/macro-assembler-loong64.cc @@ -48,7 +48,7 @@ static inline bool IsZero(const Operand& rk) { } } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const { @@ -65,7 +65,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -83,7 +83,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -100,11 +100,11 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -void TurboAssembler::LoadRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadRoot(Register destination, RootIndex index) { Ld_d(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index))); } -void TurboAssembler::PushCommonFrame(Register marker_reg) { +void MacroAssembler::PushCommonFrame(Register marker_reg) { if (marker_reg.is_valid()) { Push(ra, fp, marker_reg); Add_d(fp, sp, Operand(kPointerSize)); @@ -114,7 +114,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { } } -void TurboAssembler::PushStandardFrame(Register function_reg) { +void MacroAssembler::PushStandardFrame(Register function_reg) { int offset = -StandardFrameConstants::kContextOffset; if (function_reg.is_valid()) { Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister); @@ -165,17 +165,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset, bind(&done); } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; MultiPush(registers); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { if (registers.is_empty()) return; MultiPop(registers); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode) { ASM_CODE_COMMENT(this); RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); @@ -193,7 +193,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -212,7 +212,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { // Use CallRecordWriteStubSaveRegisters if the object and slot registers @@ -232,7 +232,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, } } -void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, +void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, Register object, Operand offset) { ASM_CODE_COMMENT(this); DCHECK_NE(dst_object, dst_slot); @@ -325,7 +325,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, // --------------------------------------------------------------------------- // Instruction macros. -void TurboAssembler::Add_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Add_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { add_w(rd, rj, rk.rm()); } else { @@ -342,7 +342,7 @@ void TurboAssembler::Add_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Add_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Add_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { add_d(rd, rj, rk.rm()); } else { @@ -359,7 +359,7 @@ void TurboAssembler::Add_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Sub_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sub_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { sub_w(rd, rj, rk.rm()); } else { @@ -384,7 +384,7 @@ void TurboAssembler::Sub_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Sub_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sub_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { sub_d(rd, rj, rk.rm()); } else if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) { @@ -411,7 +411,7 @@ void TurboAssembler::Sub_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mul_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mul_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mul_w(rd, rj, rk.rm()); } else { @@ -424,7 +424,7 @@ void TurboAssembler::Mul_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mulh_w(rd, rj, rk.rm()); } else { @@ -437,7 +437,7 @@ void TurboAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mulh_wu(rd, rj, rk.rm()); } else { @@ -450,7 +450,7 @@ void TurboAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mul_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mul_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mul_d(rd, rj, rk.rm()); } else { @@ -463,7 +463,7 @@ void TurboAssembler::Mul_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mulh_d(rd, rj, rk.rm()); } else { @@ -476,7 +476,7 @@ void TurboAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mulh_du(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mulh_du(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mulh_du(rd, rj, rk.rm()); } else { @@ -489,7 +489,7 @@ void TurboAssembler::Mulh_du(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Div_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Div_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { div_w(rd, rj, rk.rm()); } else { @@ -502,7 +502,7 @@ void TurboAssembler::Div_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mod_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mod_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mod_w(rd, rj, rk.rm()); } else { @@ -515,7 +515,7 @@ void TurboAssembler::Mod_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mod_wu(rd, rj, rk.rm()); } else { @@ -528,7 +528,7 @@ void TurboAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Div_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Div_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { div_d(rd, rj, rk.rm()); } else { @@ -541,7 +541,7 @@ void TurboAssembler::Div_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Div_wu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Div_wu(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { div_wu(rd, rj, rk.rm()); } else { @@ -554,7 +554,7 @@ void TurboAssembler::Div_wu(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Div_du(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Div_du(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { div_du(rd, rj, rk.rm()); } else { @@ -567,7 +567,7 @@ void TurboAssembler::Div_du(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mod_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mod_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mod_d(rd, rj, rk.rm()); } else { @@ -580,7 +580,7 @@ void TurboAssembler::Mod_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mod_du(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mod_du(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mod_du(rd, rj, rk.rm()); } else { @@ -593,7 +593,7 @@ void TurboAssembler::Mod_du(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::And(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::And(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { and_(rd, rj, rk.rm()); } else { @@ -610,7 +610,7 @@ void TurboAssembler::And(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Or(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Or(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { or_(rd, rj, rk.rm()); } else { @@ -627,7 +627,7 @@ void TurboAssembler::Or(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Xor(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Xor(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { xor_(rd, rj, rk.rm()); } else { @@ -644,7 +644,7 @@ void TurboAssembler::Xor(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Nor(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Nor(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { nor(rd, rj, rk.rm()); } else { @@ -657,7 +657,7 @@ void TurboAssembler::Nor(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Andn(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Andn(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { andn(rd, rj, rk.rm()); } else { @@ -670,7 +670,7 @@ void TurboAssembler::Andn(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Orn(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Orn(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { orn(rd, rj, rk.rm()); } else { @@ -683,12 +683,12 @@ void TurboAssembler::Orn(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Neg(Register rj, const Operand& rk) { +void MacroAssembler::Neg(Register rj, const Operand& rk) { DCHECK(rk.is_reg()); sub_d(rj, zero_reg, rk.rm()); } -void TurboAssembler::Slt(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Slt(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { slt(rd, rj, rk.rm()); } else { @@ -706,7 +706,7 @@ void TurboAssembler::Slt(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Sltu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sltu(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { sltu(rd, rj, rk.rm()); } else { @@ -724,7 +724,7 @@ void TurboAssembler::Sltu(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Sle(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sle(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { slt(rd, rk.rm(), rj); } else { @@ -739,7 +739,7 @@ void TurboAssembler::Sle(Register rd, Register rj, const Operand& rk) { xori(rd, rd, 1); } -void TurboAssembler::Sleu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sleu(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { sltu(rd, rk.rm(), rj); } else { @@ -754,17 +754,17 @@ void TurboAssembler::Sleu(Register rd, Register rj, const Operand& rk) { xori(rd, rd, 1); } -void TurboAssembler::Sge(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sge(Register rd, Register rj, const Operand& rk) { Slt(rd, rj, rk); xori(rd, rd, 1); } -void TurboAssembler::Sgeu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sgeu(Register rd, Register rj, const Operand& rk) { Sltu(rd, rj, rk); xori(rd, rd, 1); } -void TurboAssembler::Sgt(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sgt(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { slt(rd, rk.rm(), rj); } else { @@ -778,7 +778,7 @@ void TurboAssembler::Sgt(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Sgtu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sgtu(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { sltu(rd, rk.rm(), rj); } else { @@ -792,7 +792,7 @@ void TurboAssembler::Sgtu(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { rotr_w(rd, rj, rk.rm()); } else { @@ -804,7 +804,7 @@ void TurboAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { rotr_d(rd, rj, rk.rm()); } else { @@ -814,7 +814,7 @@ void TurboAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa, +void MacroAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa, Register scratch) { DCHECK(sa >= 1 && sa <= 31); if (sa <= 4) { @@ -827,7 +827,7 @@ void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa, } } -void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa, +void MacroAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa, Register scratch) { DCHECK(sa >= 1 && sa <= 63); if (sa <= 4) { @@ -843,7 +843,7 @@ void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa, // ------------Pseudo-instructions------------- // Change endianness -void TurboAssembler::ByteSwapSigned(Register dest, Register src, +void MacroAssembler::ByteSwapSigned(Register dest, Register src, int operand_size) { DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8); if (operand_size == 2) { @@ -857,7 +857,7 @@ void TurboAssembler::ByteSwapSigned(Register dest, Register src, } } -void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, +void MacroAssembler::ByteSwapUnsigned(Register dest, Register src, int operand_size) { DCHECK(operand_size == 2 || operand_size == 4); if (operand_size == 2) { @@ -869,7 +869,7 @@ void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, } } -void TurboAssembler::Ld_b(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_b(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -879,7 +879,7 @@ void TurboAssembler::Ld_b(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ld_bu(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_bu(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -889,7 +889,7 @@ void TurboAssembler::Ld_bu(Register rd, const MemOperand& rj) { } } -void TurboAssembler::St_b(Register rd, const MemOperand& rj) { +void MacroAssembler::St_b(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -899,7 +899,7 @@ void TurboAssembler::St_b(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ld_h(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_h(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -909,7 +909,7 @@ void TurboAssembler::Ld_h(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ld_hu(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_hu(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -919,7 +919,7 @@ void TurboAssembler::Ld_hu(Register rd, const MemOperand& rj) { } } -void TurboAssembler::St_h(Register rd, const MemOperand& rj) { +void MacroAssembler::St_h(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -929,7 +929,7 @@ void TurboAssembler::St_h(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ld_w(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_w(Register rd, const MemOperand& rj) { MemOperand source = rj; if (!(source.hasIndexReg()) && is_int16(source.offset()) && @@ -946,7 +946,7 @@ void TurboAssembler::Ld_w(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ld_wu(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_wu(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -956,7 +956,7 @@ void TurboAssembler::Ld_wu(Register rd, const MemOperand& rj) { } } -void TurboAssembler::St_w(Register rd, const MemOperand& rj) { +void MacroAssembler::St_w(Register rd, const MemOperand& rj) { MemOperand source = rj; if (!(source.hasIndexReg()) && is_int16(source.offset()) && @@ -973,7 +973,7 @@ void TurboAssembler::St_w(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ld_d(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_d(Register rd, const MemOperand& rj) { MemOperand source = rj; if (!(source.hasIndexReg()) && is_int16(source.offset()) && @@ -990,7 +990,7 @@ void TurboAssembler::Ld_d(Register rd, const MemOperand& rj) { } } -void TurboAssembler::St_d(Register rd, const MemOperand& rj) { +void MacroAssembler::St_d(Register rd, const MemOperand& rj) { MemOperand source = rj; if (!(source.hasIndexReg()) && is_int16(source.offset()) && @@ -1007,7 +1007,7 @@ void TurboAssembler::St_d(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Fld_s(FPURegister fd, const MemOperand& src) { +void MacroAssembler::Fld_s(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); if (tmp.hasIndexReg()) { @@ -1017,7 +1017,7 @@ void TurboAssembler::Fld_s(FPURegister fd, const MemOperand& src) { } } -void TurboAssembler::Fst_s(FPURegister fs, const MemOperand& src) { +void MacroAssembler::Fst_s(FPURegister fs, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); if (tmp.hasIndexReg()) { @@ -1027,7 +1027,7 @@ void TurboAssembler::Fst_s(FPURegister fs, const MemOperand& src) { } } -void TurboAssembler::Fld_d(FPURegister fd, const MemOperand& src) { +void MacroAssembler::Fld_d(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); if (tmp.hasIndexReg()) { @@ -1037,7 +1037,7 @@ void TurboAssembler::Fld_d(FPURegister fd, const MemOperand& src) { } } -void TurboAssembler::Fst_d(FPURegister fs, const MemOperand& src) { +void MacroAssembler::Fst_d(FPURegister fs, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); if (tmp.hasIndexReg()) { @@ -1047,7 +1047,7 @@ void TurboAssembler::Fst_d(FPURegister fs, const MemOperand& src) { } } -void TurboAssembler::Ll_w(Register rd, const MemOperand& rj) { +void MacroAssembler::Ll_w(Register rd, const MemOperand& rj) { DCHECK(!rj.hasIndexReg()); bool is_one_instruction = is_int14(rj.offset()); if (is_one_instruction) { @@ -1061,7 +1061,7 @@ void TurboAssembler::Ll_w(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ll_d(Register rd, const MemOperand& rj) { +void MacroAssembler::Ll_d(Register rd, const MemOperand& rj) { DCHECK(!rj.hasIndexReg()); bool is_one_instruction = is_int14(rj.offset()); if (is_one_instruction) { @@ -1075,7 +1075,7 @@ void TurboAssembler::Ll_d(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Sc_w(Register rd, const MemOperand& rj) { +void MacroAssembler::Sc_w(Register rd, const MemOperand& rj) { DCHECK(!rj.hasIndexReg()); bool is_one_instruction = is_int14(rj.offset()); if (is_one_instruction) { @@ -1089,7 +1089,7 @@ void TurboAssembler::Sc_w(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Sc_d(Register rd, const MemOperand& rj) { +void MacroAssembler::Sc_d(Register rd, const MemOperand& rj) { DCHECK(!rj.hasIndexReg()); bool is_one_instruction = is_int14(rj.offset()); if (is_one_instruction) { @@ -1103,7 +1103,7 @@ void TurboAssembler::Sc_d(Register rd, const MemOperand& rj) { } } -void TurboAssembler::li(Register dst, Handle value, LiFlags mode) { +void MacroAssembler::li(Register dst, Handle value, LiFlags mode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -1114,7 +1114,7 @@ void TurboAssembler::li(Register dst, Handle value, LiFlags mode) { li(dst, Operand(value), mode); } -void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) { +void MacroAssembler::li(Register dst, ExternalReference value, LiFlags mode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -1134,7 +1134,7 @@ static inline int InstrCountForLiLower32Bit(int64_t value) { } } -void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) { +void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) { if (is_int12(static_cast(j.immediate()))) { addi_d(rd, zero_reg, j.immediate()); } else if (is_uint12(static_cast(j.immediate()))) { @@ -1147,7 +1147,7 @@ void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) { } } -int TurboAssembler::InstrCountForLi64Bit(int64_t value) { +int MacroAssembler::InstrCountForLi64Bit(int64_t value) { if (is_int32(value)) { return InstrCountForLiLower32Bit(value); } else if (is_int52(value)) { @@ -1177,7 +1177,7 @@ int TurboAssembler::InstrCountForLi64Bit(int64_t value) { // All changes to if...else conditions here must be added to // InstrCountForLi64Bit as well. -void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { +void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { DCHECK(!j.is_reg()); DCHECK(!MustUseReg(j.rmode())); DCHECK(mode == OPTIMIZE_SIZE); @@ -1212,7 +1212,7 @@ void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { } } -void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { +void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { DCHECK(!j.is_reg()); BlockTrampolinePoolScope block_trampoline_pool(this); if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { @@ -1245,7 +1245,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { } } -void TurboAssembler::MultiPush(RegList regs) { +void MacroAssembler::MultiPush(RegList regs) { int16_t stack_offset = 0; for (int16_t i = kNumRegisters - 1; i >= 0; i--) { @@ -1257,7 +1257,7 @@ void TurboAssembler::MultiPush(RegList regs) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::MultiPush(RegList regs1, RegList regs2) { +void MacroAssembler::MultiPush(RegList regs1, RegList regs2) { DCHECK((regs1 & regs2).is_empty()); int16_t stack_offset = 0; @@ -1276,7 +1276,7 @@ void TurboAssembler::MultiPush(RegList regs1, RegList regs2) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) { +void MacroAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) { DCHECK((regs1 & regs2).is_empty()); DCHECK((regs1 & regs3).is_empty()); DCHECK((regs2 & regs3).is_empty()); @@ -1303,7 +1303,7 @@ void TurboAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::MultiPop(RegList regs) { +void MacroAssembler::MultiPop(RegList regs) { int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -1315,7 +1315,7 @@ void TurboAssembler::MultiPop(RegList regs) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::MultiPop(RegList regs1, RegList regs2) { +void MacroAssembler::MultiPop(RegList regs1, RegList regs2) { DCHECK((regs1 & regs2).is_empty()); int16_t stack_offset = 0; @@ -1334,7 +1334,7 @@ void TurboAssembler::MultiPop(RegList regs1, RegList regs2) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) { +void MacroAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) { DCHECK((regs1 & regs2).is_empty()); DCHECK((regs1 & regs3).is_empty()); DCHECK((regs2 & regs3).is_empty()); @@ -1361,7 +1361,7 @@ void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::MultiPushFPU(DoubleRegList regs) { +void MacroAssembler::MultiPushFPU(DoubleRegList regs) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kDoubleSize; @@ -1374,7 +1374,7 @@ void TurboAssembler::MultiPushFPU(DoubleRegList regs) { } } -void TurboAssembler::MultiPopFPU(DoubleRegList regs) { +void MacroAssembler::MultiPopFPU(DoubleRegList regs) { int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -1386,7 +1386,7 @@ void TurboAssembler::MultiPopFPU(DoubleRegList regs) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw, +void MacroAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw, uint16_t lsbw) { DCHECK_LT(lsbw, msbw); DCHECK_LT(lsbw, 32); @@ -1394,7 +1394,7 @@ void TurboAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw, bstrpick_w(rk, rj, msbw, lsbw); } -void TurboAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw, +void MacroAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw) { DCHECK_LT(lsbw, msbw); DCHECK_LT(lsbw, 64); @@ -1402,17 +1402,17 @@ void TurboAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw, bstrpick_d(rk, rj, msbw, lsbw); } -void TurboAssembler::Neg_s(FPURegister fd, FPURegister fj) { fneg_s(fd, fj); } +void MacroAssembler::Neg_s(FPURegister fd, FPURegister fj) { fneg_s(fd, fj); } -void TurboAssembler::Neg_d(FPURegister fd, FPURegister fj) { fneg_d(fd, fj); } +void MacroAssembler::Neg_d(FPURegister fd, FPURegister fj) { fneg_d(fd, fj); } -void TurboAssembler::Ffint_d_uw(FPURegister fd, FPURegister fj) { +void MacroAssembler::Ffint_d_uw(FPURegister fd, FPURegister fj) { BlockTrampolinePoolScope block_trampoline_pool(this); movfr2gr_s(t8, fj); Ffint_d_uw(fd, t8); } -void TurboAssembler::Ffint_d_uw(FPURegister fd, Register rj) { +void MacroAssembler::Ffint_d_uw(FPURegister fd, Register rj) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(rj != t7); @@ -1421,13 +1421,13 @@ void TurboAssembler::Ffint_d_uw(FPURegister fd, Register rj) { ffint_d_l(fd, fd); } -void TurboAssembler::Ffint_d_ul(FPURegister fd, FPURegister fj) { +void MacroAssembler::Ffint_d_ul(FPURegister fd, FPURegister fj) { BlockTrampolinePoolScope block_trampoline_pool(this); movfr2gr_d(t8, fj); Ffint_d_ul(fd, t8); } -void TurboAssembler::Ffint_d_ul(FPURegister fd, Register rj) { +void MacroAssembler::Ffint_d_ul(FPURegister fd, Register rj) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(rj != t7); @@ -1452,13 +1452,13 @@ void TurboAssembler::Ffint_d_ul(FPURegister fd, Register rj) { bind(&conversion_done); } -void TurboAssembler::Ffint_s_uw(FPURegister fd, FPURegister fj) { +void MacroAssembler::Ffint_s_uw(FPURegister fd, FPURegister fj) { BlockTrampolinePoolScope block_trampoline_pool(this); movfr2gr_d(t8, fj); Ffint_s_uw(fd, t8); } -void TurboAssembler::Ffint_s_uw(FPURegister fd, Register rj) { +void MacroAssembler::Ffint_s_uw(FPURegister fd, Register rj) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(rj != t7); @@ -1467,13 +1467,13 @@ void TurboAssembler::Ffint_s_uw(FPURegister fd, Register rj) { ffint_s_l(fd, fd); } -void TurboAssembler::Ffint_s_ul(FPURegister fd, FPURegister fj) { +void MacroAssembler::Ffint_s_ul(FPURegister fd, FPURegister fj) { BlockTrampolinePoolScope block_trampoline_pool(this); movfr2gr_d(t8, fj); Ffint_s_ul(fd, t8); } -void TurboAssembler::Ffint_s_ul(FPURegister fd, Register rj) { +void MacroAssembler::Ffint_s_ul(FPURegister fd, Register rj) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(rj != t7); @@ -1530,28 +1530,28 @@ void MacroAssembler::Ftintrz_l_ud(FPURegister fd, FPURegister fj, Ftintrz_l_d(fd, scratch); } -void TurboAssembler::Ftintrz_uw_d(FPURegister fd, FPURegister fj, +void MacroAssembler::Ftintrz_uw_d(FPURegister fd, FPURegister fj, FPURegister scratch) { BlockTrampolinePoolScope block_trampoline_pool(this); Ftintrz_uw_d(t8, fj, scratch); movgr2fr_w(fd, t8); } -void TurboAssembler::Ftintrz_uw_s(FPURegister fd, FPURegister fj, +void MacroAssembler::Ftintrz_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch) { BlockTrampolinePoolScope block_trampoline_pool(this); Ftintrz_uw_s(t8, fj, scratch); movgr2fr_w(fd, t8); } -void TurboAssembler::Ftintrz_ul_d(FPURegister fd, FPURegister fj, +void MacroAssembler::Ftintrz_ul_d(FPURegister fd, FPURegister fj, FPURegister scratch, Register result) { BlockTrampolinePoolScope block_trampoline_pool(this); Ftintrz_ul_d(t8, fj, scratch, result); movgr2fr_d(fd, t8); } -void TurboAssembler::Ftintrz_ul_s(FPURegister fd, FPURegister fj, +void MacroAssembler::Ftintrz_ul_s(FPURegister fd, FPURegister fj, FPURegister scratch, Register result) { BlockTrampolinePoolScope block_trampoline_pool(this); Ftintrz_ul_s(t8, fj, scratch, result); @@ -1574,7 +1574,7 @@ void MacroAssembler::Ftintrp_w_d(FPURegister fd, FPURegister fj) { ftintrp_w_d(fd, fj); } -void TurboAssembler::Ftintrz_uw_d(Register rd, FPURegister fj, +void MacroAssembler::Ftintrz_uw_d(Register rd, FPURegister fj, FPURegister scratch) { DCHECK(fj != scratch); DCHECK(rd != t7); @@ -1610,7 +1610,7 @@ void TurboAssembler::Ftintrz_uw_d(Register rd, FPURegister fj, bind(&done); } -void TurboAssembler::Ftintrz_uw_s(Register rd, FPURegister fj, +void MacroAssembler::Ftintrz_uw_s(Register rd, FPURegister fj, FPURegister scratch) { DCHECK(fj != scratch); DCHECK(rd != t7); @@ -1644,7 +1644,7 @@ void TurboAssembler::Ftintrz_uw_s(Register rd, FPURegister fj, bind(&done); } -void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj, +void MacroAssembler::Ftintrz_ul_d(Register rd, FPURegister fj, FPURegister scratch, Register result) { DCHECK(fj != scratch); DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7)); @@ -1699,7 +1699,7 @@ void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj, bind(&fail); } -void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj, +void MacroAssembler::Ftintrz_ul_s(Register rd, FPURegister fj, FPURegister scratch, Register result) { DCHECK(fj != scratch); DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7)); @@ -1758,7 +1758,7 @@ void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj, bind(&fail); } -void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, +void MacroAssembler::RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode) { BlockTrampolinePoolScope block_trampoline_pool(this); Register scratch = t8; @@ -1769,23 +1769,23 @@ void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, movgr2fcsr(scratch); } -void TurboAssembler::Floor_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Floor_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_floor); } -void TurboAssembler::Ceil_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Ceil_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_ceil); } -void TurboAssembler::Trunc_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Trunc_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_trunc); } -void TurboAssembler::Round_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Round_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_round); } -void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, +void MacroAssembler::RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode) { BlockTrampolinePoolScope block_trampoline_pool(this); Register scratch = t8; @@ -1796,23 +1796,23 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, movgr2fcsr(scratch); } -void TurboAssembler::Floor_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Floor_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_floor); } -void TurboAssembler::Ceil_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Ceil_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_ceil); } -void TurboAssembler::Trunc_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Trunc_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_trunc); } -void TurboAssembler::Round_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Round_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_round); } -void TurboAssembler::CompareF(FPURegister cmp1, FPURegister cmp2, +void MacroAssembler::CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd, bool f32) { if (f32) { fcmp_cond_s(cc, cmp1, cmp2, cd); @@ -1821,20 +1821,20 @@ void TurboAssembler::CompareF(FPURegister cmp1, FPURegister cmp2, } } -void TurboAssembler::CompareIsNanF(FPURegister cmp1, FPURegister cmp2, +void MacroAssembler::CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd, bool f32) { CompareF(cmp1, cmp2, CUN, cd, f32); } -void TurboAssembler::BranchTrueShortF(Label* target, CFRegister cj) { +void MacroAssembler::BranchTrueShortF(Label* target, CFRegister cj) { bcnez(cj, target); } -void TurboAssembler::BranchFalseShortF(Label* target, CFRegister cj) { +void MacroAssembler::BranchFalseShortF(Label* target, CFRegister cj) { bceqz(cj, target); } -void TurboAssembler::BranchTrueF(Label* target, CFRegister cj) { +void MacroAssembler::BranchTrueF(Label* target, CFRegister cj) { // TODO(yuyin): can be optimzed bool long_branch = target->is_bound() ? !is_near(target, OffsetSize::kOffset21) @@ -1849,7 +1849,7 @@ void TurboAssembler::BranchTrueF(Label* target, CFRegister cj) { } } -void TurboAssembler::BranchFalseF(Label* target, CFRegister cj) { +void MacroAssembler::BranchFalseF(Label* target, CFRegister cj) { bool long_branch = target->is_bound() ? !is_near(target, OffsetSize::kOffset21) : is_trampoline_emitted(); @@ -1863,7 +1863,7 @@ void TurboAssembler::BranchFalseF(Label* target, CFRegister cj) { } } -void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { +void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); DCHECK(src_low != scratch); @@ -1872,14 +1872,14 @@ void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { movgr2frh_w(dst, scratch); } -void TurboAssembler::Move(FPURegister dst, uint32_t src) { +void MacroAssembler::Move(FPURegister dst, uint32_t src) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(static_cast(src))); movgr2fr_w(dst, scratch); } -void TurboAssembler::Move(FPURegister dst, uint64_t src) { +void MacroAssembler::Move(FPURegister dst, uint64_t src) { // Handle special values first. if (src == base::bit_cast(0.0) && has_double_zero_reg_set_) { fmov_d(dst, kDoubleRegZero); @@ -1895,7 +1895,7 @@ void TurboAssembler::Move(FPURegister dst, uint64_t src) { } } -void TurboAssembler::Movz(Register rd, Register rj, Register rk) { +void MacroAssembler::Movz(Register rd, Register rj, Register rk) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); masknez(scratch, rj, rk); @@ -1903,7 +1903,7 @@ void TurboAssembler::Movz(Register rd, Register rj, Register rk) { or_(rd, rd, scratch); } -void TurboAssembler::Movn(Register rd, Register rj, Register rk) { +void MacroAssembler::Movn(Register rd, Register rj, Register rk) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); maskeqz(scratch, rj, rk); @@ -1911,7 +1911,7 @@ void TurboAssembler::Movn(Register rd, Register rj, Register rk) { or_(rd, rd, scratch); } -void TurboAssembler::LoadZeroOnCondition(Register rd, Register rj, +void MacroAssembler::LoadZeroOnCondition(Register rd, Register rj, const Operand& rk, Condition cond) { BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { @@ -1995,40 +1995,40 @@ void TurboAssembler::LoadZeroOnCondition(Register rd, Register rj, } // namespace internal } // namespace internal -void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, +void MacroAssembler::LoadZeroIfConditionNotZero(Register dest, Register condition) { masknez(dest, dest, condition); } -void TurboAssembler::LoadZeroIfConditionZero(Register dest, +void MacroAssembler::LoadZeroIfConditionZero(Register dest, Register condition) { maskeqz(dest, dest, condition); } -void TurboAssembler::LoadZeroIfFPUCondition(Register dest, CFRegister cc) { +void MacroAssembler::LoadZeroIfFPUCondition(Register dest, CFRegister cc) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); movcf2gr(scratch, cc); LoadZeroIfConditionNotZero(dest, scratch); } -void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest, CFRegister cc) { +void MacroAssembler::LoadZeroIfNotFPUCondition(Register dest, CFRegister cc) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); movcf2gr(scratch, cc); LoadZeroIfConditionZero(dest, scratch); } -void TurboAssembler::Clz_w(Register rd, Register rj) { clz_w(rd, rj); } +void MacroAssembler::Clz_w(Register rd, Register rj) { clz_w(rd, rj); } -void TurboAssembler::Clz_d(Register rd, Register rj) { clz_d(rd, rj); } +void MacroAssembler::Clz_d(Register rd, Register rj) { clz_d(rd, rj); } -void TurboAssembler::Ctz_w(Register rd, Register rj) { ctz_w(rd, rj); } +void MacroAssembler::Ctz_w(Register rd, Register rj) { ctz_w(rd, rj); } -void TurboAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); } +void MacroAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); } // TODO(LOONG_dev): Optimize like arm64, use simd instruction -void TurboAssembler::Popcnt_w(Register rd, Register rj) { +void MacroAssembler::Popcnt_w(Register rd, Register rj) { ASM_CODE_COMMENT(this); // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel // @@ -2072,7 +2072,7 @@ void TurboAssembler::Popcnt_w(Register rd, Register rj) { srli_w(rd, rd, shift); } -void TurboAssembler::Popcnt_d(Register rd, Register rj) { +void MacroAssembler::Popcnt_d(Register rd, Register rj) { ASM_CODE_COMMENT(this); int64_t B0 = 0x5555555555555555l; // (T)~(T)0/3 int64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3 @@ -2102,7 +2102,7 @@ void TurboAssembler::Popcnt_d(Register rd, Register rj) { srli_d(rd, rd, shift); } -void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, +void MacroAssembler::ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend) { sra_d(dest, source, pos); bstrpick_d(dest, dest, size - 1, 0); @@ -2124,7 +2124,7 @@ void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, } } -void TurboAssembler::InsertBits(Register dest, Register source, Register pos, +void MacroAssembler::InsertBits(Register dest, Register source, Register pos, int size) { Rotr_d(dest, dest, pos); bstrins_d(dest, source, size - 1, 0); @@ -2136,7 +2136,7 @@ void TurboAssembler::InsertBits(Register dest, Register source, Register pos, } } -void TurboAssembler::TryInlineTruncateDoubleToI(Register result, +void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label* done) { DoubleRegister single_scratch = kScratchDoubleReg.low(); @@ -2159,7 +2159,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result, bcnez(FCC0, done); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DoubleRegister double_input, StubCallMode stub_mode) { @@ -2193,7 +2193,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, DCHECK((cond == cc_always && rj == zero_reg && rk.rm() == zero_reg) || \ (cond != cc_always && (rj != zero_reg || rk.rm() != zero_reg))) -void TurboAssembler::Branch(Label* L, bool need_link) { +void MacroAssembler::Branch(Label* L, bool need_link) { int offset = GetOffset(L, OffsetSize::kOffset26); if (need_link) { bl(offset); @@ -2202,7 +2202,7 @@ void TurboAssembler::Branch(Label* L, bool need_link) { } } -void TurboAssembler::Branch(Label* L, Condition cond, Register rj, +void MacroAssembler::Branch(Label* L, Condition cond, Register rj, const Operand& rk, bool need_link) { if (L->is_bound()) { BRANCH_ARGS_CHECK(cond, rj, rk); @@ -2234,7 +2234,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rj, } } -void TurboAssembler::Branch(Label* L, Condition cond, Register rj, +void MacroAssembler::Branch(Label* L, Condition cond, Register rj, RootIndex index) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -2242,11 +2242,11 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rj, Branch(L, cond, rj, Operand(scratch)); } -int32_t TurboAssembler::GetOffset(Label* L, OffsetSize bits) { +int32_t MacroAssembler::GetOffset(Label* L, OffsetSize bits) { return branch_offset_helper(L, bits) >> 2; } -Register TurboAssembler::GetRkAsRegisterHelper(const Operand& rk, +Register MacroAssembler::GetRkAsRegisterHelper(const Operand& rk, Register scratch) { Register r2 = no_reg; if (rk.is_reg()) { @@ -2259,7 +2259,7 @@ Register TurboAssembler::GetRkAsRegisterHelper(const Operand& rk, return r2; } -bool TurboAssembler::BranchShortOrFallback(Label* L, Condition cond, +bool MacroAssembler::BranchShortOrFallback(Label* L, Condition cond, Register rj, const Operand& rk, bool need_link) { UseScratchRegisterScope temps(this); @@ -2490,7 +2490,7 @@ bool TurboAssembler::BranchShortOrFallback(Label* L, Condition cond, return true; } -void TurboAssembler::BranchShort(Label* L, Condition cond, Register rj, +void MacroAssembler::BranchShort(Label* L, Condition cond, Register rj, const Operand& rk, bool need_link) { BRANCH_ARGS_CHECK(cond, rj, rk); bool result = BranchShortOrFallback(L, cond, rj, rk, need_link); @@ -2498,7 +2498,7 @@ void TurboAssembler::BranchShort(Label* L, Condition cond, Register rj, USE(result); } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { ASM_CODE_COMMENT(this); DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); @@ -2508,11 +2508,11 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, constant_index * kPointerSize)); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { Ld_d(destination, MemOperand(kRootRegister, offset)); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { Move(destination, kRootRegister); @@ -2521,7 +2521,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -MemOperand TurboAssembler::ExternalReferenceAsOperand( +MemOperand MacroAssembler::ExternalReferenceAsOperand( ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t offset = @@ -2550,7 +2550,7 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand( return MemOperand(scratch, 0); } -void TurboAssembler::Jump(Register target, Condition cond, Register rj, +void MacroAssembler::Jump(Register target, Condition cond, Register rj, const Operand& rk) { BlockTrampolinePoolScope block_trampoline_pool(this); if (cond == cc_always) { @@ -2564,7 +2564,7 @@ void TurboAssembler::Jump(Register target, Condition cond, Register rj, } } -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond, Register rj, const Operand& rk) { Label skip; if (cond != cc_always) { @@ -2578,13 +2578,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, } } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, Register rj, const Operand& rk) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(static_cast(target), rmode, cond, rj, rk); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond, Register rj, const Operand& rk) { DCHECK(RelocInfo::IsCodeTarget(rmode)); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -2604,13 +2604,13 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, bind(&skip); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { li(t7, reference); Jump(t7); } // Note: To call gcc-compiled C code on loonarch, you must call through t[0-8]. -void TurboAssembler::Call(Register target, Condition cond, Register rj, +void MacroAssembler::Call(Register target, Condition cond, Register rj, const Operand& rk) { BlockTrampolinePoolScope block_trampoline_pool(this); if (cond == cc_always) { @@ -2639,7 +2639,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, } } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, Register rj, const Operand& rk) { BlockTrampolinePoolScope block_trampoline_pool(this); Label skip; @@ -2659,7 +2659,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, bind(&skip); } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, Condition cond, Register rj, const Operand& rk) { BlockTrampolinePoolScope block_trampoline_pool(this); Builtin builtin = Builtin::kNoBuiltinId; @@ -2672,7 +2672,7 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(code.address(), rmode, cond, rj, rk); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ASM_CODE_COMMENT(this); static_assert(kSystemPointerSize == 8); static_assert(kSmiTagSize == 1); @@ -2686,22 +2686,22 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { Ld_d(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { ASM_CODE_COMMENT(this); LoadEntryFromBuiltinIndex(builtin_index); Call(builtin_index); } -void TurboAssembler::CallBuiltin(Builtin builtin) { +void MacroAssembler::CallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); UseScratchRegisterScope temps(this); Register temp = temps.Acquire(); @@ -2735,7 +2735,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin) { +void MacroAssembler::TailCallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); UseScratchRegisterScope temps(this); @@ -2769,7 +2769,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) { } } -void TurboAssembler::PatchAndJump(Address target) { +void MacroAssembler::PatchAndJump(Address target) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -2782,7 +2782,7 @@ void TurboAssembler::PatchAndJump(Address target) { pc_ += sizeof(uint64_t); } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { ASM_CODE_COMMENT(this); // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. @@ -2810,7 +2810,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode, Register scratch) { switch (type) { case kCountIsInteger: { @@ -2834,7 +2834,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, ArgumentsCountType type, ArgumentsCountMode mode, @@ -2850,11 +2850,11 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, } } -void TurboAssembler::Ret(Condition cond, Register rj, const Operand& rk) { +void MacroAssembler::Ret(Condition cond, Register rj, const Operand& rk) { Jump(ra, cond, rj, rk); } -void TurboAssembler::Drop(int count, Condition cond, Register reg, +void MacroAssembler::Drop(int count, Condition cond, Register reg, const Operand& op) { if (count <= 0) { return; @@ -2885,23 +2885,23 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { } } -void TurboAssembler::Call(Label* target) { Branch(target, true); } +void MacroAssembler::Call(Label* target) { Branch(target, true); } -void TurboAssembler::Push(Smi smi) { +void MacroAssembler::Push(Smi smi) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(smi)); Push(scratch); } -void TurboAssembler::Push(Handle handle) { +void MacroAssembler::Push(Handle handle) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(handle)); Push(scratch); } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, Register scratch2, PushArrayOrder order) { DCHECK(!AreAliased(array, size, scratch, scratch2)); Label loop, entry; @@ -2961,7 +2961,7 @@ void MacroAssembler::PopStackHandler() { St_d(a1, MemOperand(scratch, 0)); } -void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, +void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src) { fsub_d(dst, src, kDoubleRegZero); } @@ -2977,10 +2977,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); Ld_d(destination, MemOperand(kRootRegister, static_cast(offset))); } @@ -3227,7 +3227,7 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg, // ----------------------------------------------------------------------------- // Runtime calls. -void TurboAssembler::AddOverflow_d(Register dst, Register left, +void MacroAssembler::AddOverflow_d(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3260,7 +3260,7 @@ void TurboAssembler::AddOverflow_d(Register dst, Register left, } } -void TurboAssembler::SubOverflow_d(Register dst, Register left, +void MacroAssembler::SubOverflow_d(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3293,7 +3293,7 @@ void TurboAssembler::SubOverflow_d(Register dst, Register left, } } -void TurboAssembler::MulOverflow_w(Register dst, Register left, +void MacroAssembler::MulOverflow_w(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3325,7 +3325,7 @@ void TurboAssembler::MulOverflow_w(Register dst, Register left, xor_(overflow, overflow, scratch2); } -void TurboAssembler::MulOverflow_d(Register dst, Register left, +void MacroAssembler::MulOverflow_d(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3441,10 +3441,10 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, // ----------------------------------------------------------------------------- // Debugging. -void TurboAssembler::Trap() { stop(); } -void TurboAssembler::DebugBreak() { stop(); } +void MacroAssembler::Trap() { stop(); } +void MacroAssembler::DebugBreak() { stop(); } -void TurboAssembler::Check(Condition cc, AbortReason reason, Register rj, +void MacroAssembler::Check(Condition cc, AbortReason reason, Register rj, Operand rk) { Label L; Branch(&L, cc, rj, rk); @@ -3453,7 +3453,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rj, bind(&L); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); if (v8_flags.code_comments) { @@ -3511,7 +3511,7 @@ void TurboAssembler::Abort(AbortReason reason) { } } -void TurboAssembler::LoadMap(Register destination, Register object) { +void MacroAssembler::LoadMap(Register destination, Register object) { Ld_d(destination, FieldMemOperand(object, HeapObject::kMapOffset)); } @@ -3522,16 +3522,16 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { Ld_d(dst, MemOperand(dst, Context::SlotOffset(index))); } -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(StackFrame::TypeToMarker(type))); PushCommonFrame(scratch); } -void TurboAssembler::Prologue() { PushStandardFrame(a1); } +void MacroAssembler::Prologue() { PushStandardFrame(a1); } -void TurboAssembler::EnterFrame(StackFrame::Type type) { +void MacroAssembler::EnterFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); Push(ra, fp); @@ -3546,7 +3546,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { #endif // V8_ENABLE_WEBASSEMBLY } -void TurboAssembler::LeaveFrame(StackFrame::Type type) { +void MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); addi_d(sp, fp, 2 * kPointerSize); Ld_d(ra, MemOperand(fp, 1 * kPointerSize)); @@ -3662,7 +3662,7 @@ void MacroAssembler::LeaveExitFrame(Register argument_count, bool do_return, } } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if V8_HOST_ARCH_LOONG64 // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -3678,7 +3678,7 @@ int TurboAssembler::ActivationFrameAlignment() { #endif // V8_HOST_ARCH_LOONG64 } -void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) { if (SmiValuesAre32Bits()) { Ld_w(dst, MemOperand(src.base(), SmiWordOffset(src.offset()))); } else { @@ -3688,7 +3688,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { } } -void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) { +void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) { DCHECK_EQ(0, kSmiTag); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3706,12 +3706,12 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) { #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs, +void MacroAssembler::Assert(Condition cc, AbortReason reason, Register rs, Operand rk) { if (v8_flags.debug_code) Check(cc, reason, rs, rk); } -void TurboAssembler::AssertNotSmi(Register object) { +void MacroAssembler::AssertNotSmi(Register object) { if (v8_flags.debug_code) { ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -3722,7 +3722,7 @@ void TurboAssembler::AssertNotSmi(Register object) { } } -void TurboAssembler::AssertSmi(Register object) { +void MacroAssembler::AssertSmi(Register object) { if (v8_flags.debug_code) { ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -3852,7 +3852,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, #endif // V8_ENABLE_DEBUG_CODE -void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -3867,12 +3867,12 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, fmax_s(dst, src1, src2); } -void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { fadd_s(dst, src1, src2); } -void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -3887,12 +3887,12 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, fmin_s(dst, src1, src2); } -void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { fadd_s(dst, src1, src2); } -void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -3907,12 +3907,12 @@ void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, fmax_d(dst, src1, src2); } -void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { fadd_d(dst, src1, src2); } -void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -3927,7 +3927,7 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, fmin_d(dst, src1, src2); } -void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { fadd_d(dst, src1, src2); } @@ -3935,7 +3935,7 @@ void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, static const int kRegisterPassedArguments = 8; static const int kFPRegisterPassedArguments = 8; -int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, +int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments) { int stack_passed_words = 0; @@ -3955,7 +3955,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, return stack_passed_words; } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { ASM_CODE_COMMENT(this); @@ -3978,12 +3978,12 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, } } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, Register scratch) { PrepareCallCFunction(num_reg_arguments, 0, scratch); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { ASM_CODE_COMMENT(this); @@ -3992,22 +3992,22 @@ void TurboAssembler::CallCFunction(ExternalReference function, CallCFunctionHelper(t7, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, +void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments) { ASM_CODE_COMMENT(this); CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunctionHelper(Register function, +void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); @@ -4096,7 +4096,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, #undef BRANCH_ARGS_CHECK -void TurboAssembler::CheckPageFlag(const Register& object, int mask, +void MacroAssembler::CheckPageFlag(const Register& object, int mask, Condition cc, Label* condition_met) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -4123,12 +4123,12 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, UNREACHABLE(); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { // TODO(LOONG_dev): range check, add Pcadd macro function? pcaddi(dst, -pc_offset() >> 2); } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); @@ -4141,14 +4141,14 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::LoadCodeEntry(Register destination, +void MacroAssembler::LoadCodeEntry(Register destination, Register code_data_container_object) { ASM_CODE_COMMENT(this); Ld_d(destination, FieldMemOperand(code_data_container_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin( +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin( Register destination, Register code_data_container_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -4158,13 +4158,13 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin( Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_data_container_object) { +void MacroAssembler::CallCodeObject(Register code_data_container_object) { ASM_CODE_COMMENT(this); LoadCodeEntry(code_data_container_object, code_data_container_object); Call(code_data_container_object); } -void TurboAssembler::JumpCodeObject(Register code_data_container_object, +void MacroAssembler::JumpCodeObject(Register code_data_container_object, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); diff --git a/src/codegen/loong64/macro-assembler-loong64.h b/src/codegen/loong64/macro-assembler-loong64.h index e4a7d9c1fa..2fcc1af3fc 100644 --- a/src/codegen/loong64/macro-assembler-loong64.h +++ b/src/codegen/loong64/macro-assembler-loong64.h @@ -59,9 +59,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) { return MemOperand(object, offset - kHeapObjectTag); } -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; // Activation support. void EnterFrame(StackFrame::Type type); @@ -773,46 +773,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Define an exception handler and bind a label. void BindExceptionHandler(Label* label) { bind(label); } - protected: - inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch); - inline int32_t GetOffset(Label* L, OffsetSize bits); - - private: - bool has_double_zero_reg_set_ = false; - - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it - // succeeds, otherwise falls through if result is saturated. On return - // 'result' either holds answer, or is clobbered on fall through. - void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, - Label* done); - - bool BranchShortOrFallback(Label* L, Condition cond, Register rj, - const Operand& rk, bool need_link); - - // f32 or f64 - void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, - CFRegister cd, bool f32 = true); - - void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd, - bool f32 = true); - - void CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments); - - void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode); - - void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode); - - // Push a fixed frame, consisting of ra, fp. - void PushCommonFrame(Register marker_reg = no_reg); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - // It assumes that the arguments are located below the stack pointer. // argc is the number of arguments not including the receiver. // TODO(LOONG_dev): LOONG64: Remove this function once we stick with the @@ -1079,17 +1039,50 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { DecodeField(reg, reg); } + protected: + inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch); + inline int32_t GetOffset(Label* L, OffsetSize bits); + private: + bool has_double_zero_reg_set_ = false; + // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, Label* done, InvokeType type); + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it + // succeeds, otherwise falls through if result is saturated. On return + // 'result' either holds answer, or is clobbered on fall through. + void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, + Label* done); + + bool BranchShortOrFallback(Label* L, Condition cond, Register rj, + const Operand& rk, bool need_link); + + // f32 or f64 + void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, + CFRegister cd, bool f32 = true); + + void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd, + bool f32 = true); + + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments); + + void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode); + + void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode); + + // Push a fixed frame, consisting of ra, fp. + void PushCommonFrame(Register marker_reg = no_reg); + DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); }; template -void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, +void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count, Func GetLabelFunction) { UseScratchRegisterScope scope(this); Register scratch = scope.Acquire(); diff --git a/src/codegen/turbo-assembler.cc b/src/codegen/macro-assembler-base.cc similarity index 87% rename from src/codegen/turbo-assembler.cc rename to src/codegen/macro-assembler-base.cc index 3c0033c7da..fce27a1e34 100644 --- a/src/codegen/turbo-assembler.cc +++ b/src/codegen/macro-assembler-base.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/codegen/turbo-assembler.h" +#include "src/codegen/macro-assembler-base.h" #include "src/builtins/builtins.h" #include "src/builtins/constants-table-builder.h" @@ -15,7 +15,7 @@ namespace v8 { namespace internal { -TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate, +MacroAssemblerBase::MacroAssemblerBase(Isolate* isolate, const AssemblerOptions& options, CodeObjectRequired create_code_object, std::unique_ptr buffer) @@ -26,7 +26,7 @@ TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate, } } -Address TurboAssemblerBase::BuiltinEntry(Builtin builtin) { +Address MacroAssemblerBase::BuiltinEntry(Builtin builtin) { DCHECK(Builtins::IsBuiltinId(builtin)); if (isolate_ != nullptr) { Address entry = isolate_->builtin_entry_table()[Builtins::ToInt(builtin)]; @@ -38,7 +38,7 @@ Address TurboAssemblerBase::BuiltinEntry(Builtin builtin) { return d.InstructionStartOfBuiltin(builtin); } -void TurboAssemblerBase::IndirectLoadConstant(Register destination, +void MacroAssemblerBase::IndirectLoadConstant(Register destination, Handle object) { CHECK(root_array_available_); @@ -71,7 +71,7 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination, } } -void TurboAssemblerBase::IndirectLoadExternalReference( +void MacroAssemblerBase::IndirectLoadExternalReference( Register destination, ExternalReference reference) { CHECK(root_array_available_); @@ -90,24 +90,24 @@ void TurboAssemblerBase::IndirectLoadExternalReference( } // static -int32_t TurboAssemblerBase::RootRegisterOffsetForRootIndex( +int32_t MacroAssemblerBase::RootRegisterOffsetForRootIndex( RootIndex root_index) { return IsolateData::root_slot_offset(root_index); } // static -int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) { +int32_t MacroAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) { return IsolateData::BuiltinSlotOffset(builtin); } // static -intptr_t TurboAssemblerBase::RootRegisterOffsetForExternalReference( +intptr_t MacroAssemblerBase::RootRegisterOffsetForExternalReference( Isolate* isolate, const ExternalReference& reference) { return static_cast(reference.address() - isolate->isolate_root()); } // static -int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry( +int32_t MacroAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry( Isolate* isolate, const ExternalReference& reference) { // Encode as an index into the external reference table stored on the // isolate. @@ -120,13 +120,13 @@ int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry( } // static -bool TurboAssemblerBase::IsAddressableThroughRootRegister( +bool MacroAssemblerBase::IsAddressableThroughRootRegister( Isolate* isolate, const ExternalReference& reference) { Address address = reference.address(); return isolate->root_register_addressable_region().contains(address); } -Tagged_t TurboAssemblerBase::ReadOnlyRootPtr(RootIndex index) { +Tagged_t MacroAssemblerBase::ReadOnlyRootPtr(RootIndex index) { DCHECK(RootsTable::IsReadOnly(index)); CHECK(V8_STATIC_ROOTS_BOOL); CHECK(isolate_->root(index).IsHeapObject()); diff --git a/src/codegen/turbo-assembler.h b/src/codegen/macro-assembler-base.h similarity index 81% rename from src/codegen/turbo-assembler.h rename to src/codegen/macro-assembler-base.h index f9d55bba96..976b154d37 100644 --- a/src/codegen/turbo-assembler.h +++ b/src/codegen/macro-assembler-base.h @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#ifndef V8_CODEGEN_TURBO_ASSEMBLER_H_ -#define V8_CODEGEN_TURBO_ASSEMBLER_H_ +#ifndef V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_ +#define V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_ #include @@ -15,30 +15,24 @@ namespace v8 { namespace internal { -// Common base class for platform-specific TurboAssemblers containing +// Common base class for platform-specific MacroAssemblers containing // platform-independent bits. -// You will encounter two subclasses, TurboAssembler (derives from -// TurboAssemblerBase), and MacroAssembler (derives from TurboAssembler). The -// main difference is that MacroAssembler is allowed to access the isolate, and -// TurboAssembler accesses the isolate in a very limited way. TurboAssembler -// contains all the functionality that is used by Turbofan, and does not expect -// to be running on the main thread. -class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler { +// TODO(victorgomes): We should use LocalIsolate instead of Isolate in the +// methods of this class. +class V8_EXPORT_PRIVATE MacroAssemblerBase : public Assembler { public: // Constructors are declared public to inherit them in derived classes // with `using` directive. - TurboAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object, + MacroAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object, std::unique_ptr buffer = {}) - : TurboAssemblerBase(isolate, AssemblerOptions::Default(isolate), + : MacroAssemblerBase(isolate, AssemblerOptions::Default(isolate), create_code_object, std::move(buffer)) {} - TurboAssemblerBase(Isolate* isolate, const AssemblerOptions& options, + MacroAssemblerBase(Isolate* isolate, const AssemblerOptions& options, CodeObjectRequired create_code_object, std::unique_ptr buffer = {}); - Isolate* isolate() const { - return isolate_; - } + Isolate* isolate() const { return isolate_; } Handle CodeObject() const { DCHECK(!code_object_.is_null()); @@ -135,25 +129,25 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler { int comment_depth_ = 0; - DISALLOW_IMPLICIT_CONSTRUCTORS(TurboAssemblerBase); + DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssemblerBase); }; // Avoids emitting calls to the {Builtin::kAbort} builtin when emitting // debug code during the lifetime of this scope object. class V8_NODISCARD HardAbortScope { public: - explicit HardAbortScope(TurboAssemblerBase* assembler) + explicit HardAbortScope(MacroAssemblerBase* assembler) : assembler_(assembler), old_value_(assembler->should_abort_hard()) { assembler_->set_abort_hard(true); } ~HardAbortScope() { assembler_->set_abort_hard(old_value_); } private: - TurboAssemblerBase* assembler_; + MacroAssemblerBase* assembler_; bool old_value_; }; } // namespace internal } // namespace v8 -#endif // V8_CODEGEN_TURBO_ASSEMBLER_H_ +#endif // V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_ diff --git a/src/codegen/macro-assembler.h b/src/codegen/macro-assembler.h index 61b26a320f..3e5d83806d 100644 --- a/src/codegen/macro-assembler.h +++ b/src/codegen/macro-assembler.h @@ -5,7 +5,7 @@ #ifndef V8_CODEGEN_MACRO_ASSEMBLER_H_ #define V8_CODEGEN_MACRO_ASSEMBLER_H_ -#include "src/codegen/turbo-assembler.h" +#include "src/codegen/macro-assembler-base.h" #include "src/execution/frames.h" #include "src/heap/heap.h" @@ -82,25 +82,25 @@ static constexpr int kMaxCParameters = 256; class V8_NODISCARD FrameScope { public: - explicit FrameScope(TurboAssembler* tasm, StackFrame::Type type) + explicit FrameScope(MacroAssembler* masm, StackFrame::Type type) : #ifdef V8_CODE_COMMENTS - comment_(tasm, frame_name(type)), + comment_(masm, frame_name(type)), #endif - tasm_(tasm), + masm_(masm), type_(type), - old_has_frame_(tasm->has_frame()) { - tasm->set_has_frame(true); + old_has_frame_(masm->has_frame()) { + masm->set_has_frame(true); if (type != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) { - tasm->EnterFrame(type); + masm->EnterFrame(type); } } ~FrameScope() { if (type_ != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) { - tasm_->LeaveFrame(type_); + masm_->LeaveFrame(type_); } - tasm_->set_has_frame(old_has_frame_); + masm_->set_has_frame(old_has_frame_); } private: @@ -125,7 +125,7 @@ class V8_NODISCARD FrameScope { Assembler::CodeComment comment_; #endif // V8_CODE_COMMENTS - TurboAssembler* tasm_; + MacroAssembler* masm_; StackFrame::Type const type_; bool const old_has_frame_; }; @@ -198,7 +198,7 @@ class V8_NODISCARD AllowExternalCallThatCantCauseGC : public FrameScope { // scope object. class V8_NODISCARD NoRootArrayScope { public: - explicit NoRootArrayScope(TurboAssembler* masm) + explicit NoRootArrayScope(MacroAssembler* masm) : masm_(masm), old_value_(masm->root_array_available()) { masm->set_root_array_available(false); } @@ -206,7 +206,7 @@ class V8_NODISCARD NoRootArrayScope { ~NoRootArrayScope() { masm_->set_root_array_available(old_value_); } private: - TurboAssembler* masm_; + MacroAssembler* masm_; bool old_value_; }; diff --git a/src/codegen/mips64/assembler-mips64.cc b/src/codegen/mips64/assembler-mips64.cc index e95a07dc84..2e91386ad6 100644 --- a/src/codegen/mips64/assembler-mips64.cc +++ b/src/codegen/mips64/assembler-mips64.cc @@ -819,7 +819,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) { Instr instr_b = REGIMM | BGEZAL; // Branch and link. instr_b = SetBranchOffset(pos, target_pos, instr_b); // Correct ra register to point to one instruction after jalr from - // TurboAssembler::BranchAndLinkLong. + // MacroAssembler::BranchAndLinkLong. Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift | kOptimizedBranchAndLinkLongReturnOffset; diff --git a/src/codegen/mips64/assembler-mips64.h b/src/codegen/mips64/assembler-mips64.h index 02b77ee767..3e4c5239f0 100644 --- a/src/codegen/mips64/assembler-mips64.h +++ b/src/codegen/mips64/assembler-mips64.h @@ -294,7 +294,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Adjust ra register in branch delay slot of bal instruction so to skip // instructions not needed after optimization of PIC in - // TurboAssembler::BranchAndLink method. + // MacroAssembler::BranchAndLink method. static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize; diff --git a/src/codegen/mips64/macro-assembler-mips64.cc b/src/codegen/mips64/macro-assembler-mips64.cc index 017fa215df..df96b7b92a 100644 --- a/src/codegen/mips64/macro-assembler-mips64.cc +++ b/src/codegen/mips64/macro-assembler-mips64.cc @@ -48,7 +48,7 @@ static inline bool IsZero(const Operand& rt) { } } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const { @@ -64,7 +64,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -81,7 +81,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -98,18 +98,18 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -void TurboAssembler::LoadRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadRoot(Register destination, RootIndex index) { Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index))); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index, +void MacroAssembler::LoadRoot(Register destination, RootIndex index, Condition cond, Register src1, const Operand& src2) { Branch(2, NegateCondition(cond), src1, src2); Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index))); } -void TurboAssembler::PushCommonFrame(Register marker_reg) { +void MacroAssembler::PushCommonFrame(Register marker_reg) { if (marker_reg.is_valid()) { Push(ra, fp, marker_reg); Daddu(fp, sp, Operand(kPointerSize)); @@ -119,7 +119,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { } } -void TurboAssembler::PushStandardFrame(Register function_reg) { +void MacroAssembler::PushStandardFrame(Register function_reg) { int offset = -StandardFrameConstants::kContextOffset; if (function_reg.is_valid()) { Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister); @@ -176,17 +176,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; MultiPush(registers); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { if (registers.is_empty()) return; MultiPop(registers); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode) { ASM_CODE_COMMENT(this); @@ -210,7 +210,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -233,7 +233,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { // Use CallRecordWriteStubSaveRegisters if the object and slot registers @@ -320,7 +320,7 @@ void MacroAssembler::RecordWrite(Register object, Register address, // --------------------------------------------------------------------------- // Instruction macros. -void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { addu(rd, rs, rt.rm()); } else { @@ -337,7 +337,7 @@ void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Daddu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { daddu(rd, rs, rt.rm()); } else { @@ -354,7 +354,7 @@ void TurboAssembler::Daddu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { subu(rd, rs, rt.rm()); } else { @@ -380,7 +380,7 @@ void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { dsubu(rd, rs, rt.rm()); } else if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) { @@ -408,7 +408,7 @@ void TurboAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mul(rd, rs, rt.rm()); } else { @@ -421,7 +421,7 @@ void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { mult(rs, rt.rm()); @@ -444,7 +444,7 @@ void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { multu(rs, rt.rm()); @@ -467,7 +467,7 @@ void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dmul(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant == kMips64r6) { dmul(rd, rs, rt.rm()); @@ -490,7 +490,7 @@ void TurboAssembler::Dmul(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dmulh(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant == kMips64r6) { dmuh(rd, rs, rt.rm()); @@ -513,7 +513,7 @@ void TurboAssembler::Dmulh(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dmulhu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dmulhu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant == kMips64r6) { dmuhu(rd, rs, rt.rm()); @@ -536,7 +536,7 @@ void TurboAssembler::Dmulhu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mult(Register rs, const Operand& rt) { +void MacroAssembler::Mult(Register rs, const Operand& rt) { if (rt.is_reg()) { mult(rs, rt.rm()); } else { @@ -549,7 +549,7 @@ void TurboAssembler::Mult(Register rs, const Operand& rt) { } } -void TurboAssembler::Dmult(Register rs, const Operand& rt) { +void MacroAssembler::Dmult(Register rs, const Operand& rt) { if (rt.is_reg()) { dmult(rs, rt.rm()); } else { @@ -562,7 +562,7 @@ void TurboAssembler::Dmult(Register rs, const Operand& rt) { } } -void TurboAssembler::Multu(Register rs, const Operand& rt) { +void MacroAssembler::Multu(Register rs, const Operand& rt) { if (rt.is_reg()) { multu(rs, rt.rm()); } else { @@ -575,7 +575,7 @@ void TurboAssembler::Multu(Register rs, const Operand& rt) { } } -void TurboAssembler::Dmultu(Register rs, const Operand& rt) { +void MacroAssembler::Dmultu(Register rs, const Operand& rt) { if (rt.is_reg()) { dmultu(rs, rt.rm()); } else { @@ -588,7 +588,7 @@ void TurboAssembler::Dmultu(Register rs, const Operand& rt) { } } -void TurboAssembler::Div(Register rs, const Operand& rt) { +void MacroAssembler::Div(Register rs, const Operand& rt) { if (rt.is_reg()) { div(rs, rt.rm()); } else { @@ -601,7 +601,7 @@ void TurboAssembler::Div(Register rs, const Operand& rt) { } } -void TurboAssembler::Div(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Div(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { div(rs, rt.rm()); @@ -624,7 +624,7 @@ void TurboAssembler::Div(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { div(rs, rt.rm()); @@ -647,7 +647,7 @@ void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { divu(rs, rt.rm()); @@ -670,7 +670,7 @@ void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Ddiv(Register rs, const Operand& rt) { +void MacroAssembler::Ddiv(Register rs, const Operand& rt) { if (rt.is_reg()) { ddiv(rs, rt.rm()); } else { @@ -683,7 +683,7 @@ void TurboAssembler::Ddiv(Register rs, const Operand& rt) { } } -void TurboAssembler::Ddiv(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) { if (kArchVariant != kMips64r6) { if (rt.is_reg()) { ddiv(rs, rt.rm()); @@ -711,7 +711,7 @@ void TurboAssembler::Ddiv(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Divu(Register rs, const Operand& rt) { +void MacroAssembler::Divu(Register rs, const Operand& rt) { if (rt.is_reg()) { divu(rs, rt.rm()); } else { @@ -724,7 +724,7 @@ void TurboAssembler::Divu(Register rs, const Operand& rt) { } } -void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { divu(rs, rt.rm()); @@ -747,7 +747,7 @@ void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Ddivu(Register rs, const Operand& rt) { +void MacroAssembler::Ddivu(Register rs, const Operand& rt) { if (rt.is_reg()) { ddivu(rs, rt.rm()); } else { @@ -760,7 +760,7 @@ void TurboAssembler::Ddivu(Register rs, const Operand& rt) { } } -void TurboAssembler::Ddivu(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { ddivu(rs, rt.rm()); @@ -783,7 +783,7 @@ void TurboAssembler::Ddivu(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Dmod(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) { if (kArchVariant != kMips64r6) { if (rt.is_reg()) { ddiv(rs, rt.rm()); @@ -811,7 +811,7 @@ void TurboAssembler::Dmod(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dmodu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) { if (kArchVariant != kMips64r6) { if (rt.is_reg()) { ddivu(rs, rt.rm()); @@ -839,7 +839,7 @@ void TurboAssembler::Dmodu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::And(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { and_(rd, rs, rt.rm()); } else { @@ -856,7 +856,7 @@ void TurboAssembler::And(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { or_(rd, rs, rt.rm()); } else { @@ -873,7 +873,7 @@ void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { xor_(rd, rs, rt.rm()); } else { @@ -890,7 +890,7 @@ void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { nor(rd, rs, rt.rm()); } else { @@ -903,11 +903,11 @@ void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Neg(Register rs, const Operand& rt) { +void MacroAssembler::Neg(Register rs, const Operand& rt) { dsubu(rs, zero_reg, rt.rm()); } -void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { slt(rd, rs, rt.rm()); } else { @@ -925,7 +925,7 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sltu(rd, rs, rt.rm()); } else { @@ -949,7 +949,7 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sle(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { slt(rd, rt.rm(), rs); } else { @@ -964,7 +964,7 @@ void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) { xori(rd, rd, 1); } -void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sleu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sltu(rd, rt.rm(), rs); } else { @@ -979,17 +979,17 @@ void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) { xori(rd, rd, 1); } -void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sge(Register rd, Register rs, const Operand& rt) { Slt(rd, rs, rt); xori(rd, rd, 1); } -void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sgeu(Register rd, Register rs, const Operand& rt) { Sltu(rd, rs, rt); xori(rd, rd, 1); } -void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sgt(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { slt(rd, rt.rm(), rs); } else { @@ -1003,7 +1003,7 @@ void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sltu(rd, rt.rm(), rs); } else { @@ -1017,7 +1017,7 @@ void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { rotrv(rd, rs, rt.rm()); } else { @@ -1029,7 +1029,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { drotrv(rd, rs, rt.rm()); } else { @@ -1047,7 +1047,7 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { pref(hint, rs); } -void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa, +void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa, Register scratch) { DCHECK(sa >= 1 && sa <= 31); if (kArchVariant == kMips64r6 && sa <= 4) { @@ -1060,7 +1060,7 @@ void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa, } } -void TurboAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa, +void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa, Register scratch) { DCHECK(sa >= 1 && sa <= 63); if (kArchVariant == kMips64r6 && sa <= 4) { @@ -1076,7 +1076,7 @@ void TurboAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa, } } -void TurboAssembler::Bovc(Register rs, Register rt, Label* L) { +void MacroAssembler::Bovc(Register rs, Register rt, Label* L) { if (is_trampoline_emitted()) { Label skip; bnvc(rs, rt, &skip); @@ -1087,7 +1087,7 @@ void TurboAssembler::Bovc(Register rs, Register rt, Label* L) { } } -void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) { +void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) { if (is_trampoline_emitted()) { Label skip; bovc(rs, rt, &skip); @@ -1101,7 +1101,7 @@ void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) { // ------------Pseudo-instructions------------- // Change endianness -void TurboAssembler::ByteSwapSigned(Register dest, Register src, +void MacroAssembler::ByteSwapSigned(Register dest, Register src, int operand_size) { DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8); DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2); @@ -1117,7 +1117,7 @@ void TurboAssembler::ByteSwapSigned(Register dest, Register src, } } -void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, +void MacroAssembler::ByteSwapUnsigned(Register dest, Register src, int operand_size) { DCHECK(operand_size == 2 || operand_size == 4); if (operand_size == 2) { @@ -1130,7 +1130,7 @@ void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, } } -void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { DCHECK(rd != at); DCHECK(rs.rm() != at); if (kArchVariant == kMips64r6) { @@ -1154,7 +1154,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) { if (kArchVariant == kMips64r6) { Lwu(rd, rs); } else { @@ -1164,7 +1164,7 @@ void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Usw(Register rd, const MemOperand& rs) { +void MacroAssembler::Usw(Register rd, const MemOperand& rs) { DCHECK(rd != at); DCHECK(rs.rm() != at); DCHECK(rd != rs.rm()); @@ -1181,7 +1181,7 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulh(Register rd, const MemOperand& rs) { DCHECK(rd != at); DCHECK(rs.rm() != at); if (kArchVariant == kMips64r6) { @@ -1215,7 +1215,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) { DCHECK(rd != at); DCHECK(rs.rm() != at); if (kArchVariant == kMips64r6) { @@ -1249,7 +1249,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { +void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { DCHECK(rd != at); DCHECK(rs.rm() != at); DCHECK(rs.rm() != scratch); @@ -1278,7 +1278,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { } } -void TurboAssembler::Uld(Register rd, const MemOperand& rs) { +void MacroAssembler::Uld(Register rd, const MemOperand& rs) { DCHECK(rd != at); DCHECK(rs.rm() != at); if (kArchVariant == kMips64r6) { @@ -1313,7 +1313,7 @@ void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs, Daddu(rd, rd, scratch); } -void TurboAssembler::Usd(Register rd, const MemOperand& rs) { +void MacroAssembler::Usd(Register rd, const MemOperand& rs) { DCHECK(rd != at); DCHECK(rs.rm() != at); if (kArchVariant == kMips64r6) { @@ -1337,7 +1337,7 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs, Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); } -void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs, +void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch) { if (kArchVariant == kMips64r6) { Lwc1(fd, rs); @@ -1348,7 +1348,7 @@ void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs, } } -void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs, +void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs, Register scratch) { if (kArchVariant == kMips64r6) { Swc1(fd, rs); @@ -1359,7 +1359,7 @@ void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs, } } -void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs, +void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs, Register scratch) { DCHECK(scratch != at); if (kArchVariant == kMips64r6) { @@ -1371,7 +1371,7 @@ void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs, } } -void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs, +void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs, Register scratch) { DCHECK(scratch != at); if (kArchVariant == kMips64r6) { @@ -1383,97 +1383,97 @@ void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs, } } -void TurboAssembler::Lb(Register rd, const MemOperand& rs) { +void MacroAssembler::Lb(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); lb(rd, source); } -void TurboAssembler::Lbu(Register rd, const MemOperand& rs) { +void MacroAssembler::Lbu(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); lbu(rd, source); } -void TurboAssembler::Sb(Register rd, const MemOperand& rs) { +void MacroAssembler::Sb(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); sb(rd, source); } -void TurboAssembler::Lh(Register rd, const MemOperand& rs) { +void MacroAssembler::Lh(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); lh(rd, source); } -void TurboAssembler::Lhu(Register rd, const MemOperand& rs) { +void MacroAssembler::Lhu(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); lhu(rd, source); } -void TurboAssembler::Sh(Register rd, const MemOperand& rs) { +void MacroAssembler::Sh(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); sh(rd, source); } -void TurboAssembler::Lw(Register rd, const MemOperand& rs) { +void MacroAssembler::Lw(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); lw(rd, source); } -void TurboAssembler::Lwu(Register rd, const MemOperand& rs) { +void MacroAssembler::Lwu(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); lwu(rd, source); } -void TurboAssembler::Sw(Register rd, const MemOperand& rs) { +void MacroAssembler::Sw(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); sw(rd, source); } -void TurboAssembler::Ld(Register rd, const MemOperand& rs) { +void MacroAssembler::Ld(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); ld(rd, source); } -void TurboAssembler::Sd(Register rd, const MemOperand& rs) { +void MacroAssembler::Sd(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); sd(rd, source); } -void TurboAssembler::Lwc1(FPURegister fd, const MemOperand& src) { +void MacroAssembler::Lwc1(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); lwc1(fd, tmp); } -void TurboAssembler::Swc1(FPURegister fs, const MemOperand& src) { +void MacroAssembler::Swc1(FPURegister fs, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); swc1(fs, tmp); } -void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) { +void MacroAssembler::Ldc1(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); ldc1(fd, tmp); } -void TurboAssembler::Sdc1(FPURegister fs, const MemOperand& src) { +void MacroAssembler::Sdc1(FPURegister fs, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); sdc1(fs, tmp); } -void TurboAssembler::Ll(Register rd, const MemOperand& rs) { +void MacroAssembler::Ll(Register rd, const MemOperand& rs) { bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset()) : is_int16(rs.offset()); if (is_one_instruction) { @@ -1487,7 +1487,7 @@ void TurboAssembler::Ll(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Lld(Register rd, const MemOperand& rs) { +void MacroAssembler::Lld(Register rd, const MemOperand& rs) { bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset()) : is_int16(rs.offset()); if (is_one_instruction) { @@ -1501,7 +1501,7 @@ void TurboAssembler::Lld(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Sc(Register rd, const MemOperand& rs) { +void MacroAssembler::Sc(Register rd, const MemOperand& rs) { bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset()) : is_int16(rs.offset()); if (is_one_instruction) { @@ -1515,7 +1515,7 @@ void TurboAssembler::Sc(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Scd(Register rd, const MemOperand& rs) { +void MacroAssembler::Scd(Register rd, const MemOperand& rs) { bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset()) : is_int16(rs.offset()); if (is_one_instruction) { @@ -1529,7 +1529,7 @@ void TurboAssembler::Scd(Register rd, const MemOperand& rs) { } } -void TurboAssembler::li(Register dst, Handle value, LiFlags mode) { +void MacroAssembler::li(Register dst, Handle value, LiFlags mode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -1540,7 +1540,7 @@ void TurboAssembler::li(Register dst, Handle value, LiFlags mode) { li(dst, Operand(value), mode); } -void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) { +void MacroAssembler::li(Register dst, ExternalReference value, LiFlags mode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -1560,7 +1560,7 @@ static inline int InstrCountForLiLower32Bit(int64_t value) { } } -void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) { +void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) { if (is_int16(static_cast(j.immediate()))) { daddiu(rd, zero_reg, (j.immediate() & kImm16Mask)); } else if (!(j.immediate() & kUpper16MaskOf64)) { @@ -1584,7 +1584,7 @@ static inline int InstrCountForLoadReplicatedConst32(int64_t value) { return INT_MAX; } -int TurboAssembler::InstrCountForLi64Bit(int64_t value) { +int MacroAssembler::InstrCountForLi64Bit(int64_t value) { if (is_int32(value)) { return InstrCountForLiLower32Bit(value); } else { @@ -1679,7 +1679,7 @@ int TurboAssembler::InstrCountForLi64Bit(int64_t value) { // All changes to if...else conditions here must be added to // InstrCountForLi64Bit as well. -void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { +void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { DCHECK(!j.is_reg()); DCHECK(!MustUseReg(j.rmode())); DCHECK(mode == OPTIMIZE_SIZE); @@ -1857,7 +1857,7 @@ void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { } } -void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { +void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { DCHECK(!j.is_reg()); BlockTrampolinePoolScope block_trampoline_pool(this); if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { @@ -1919,7 +1919,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { } } -void TurboAssembler::MultiPush(RegList regs) { +void MacroAssembler::MultiPush(RegList regs) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kPointerSize; @@ -1932,7 +1932,7 @@ void TurboAssembler::MultiPush(RegList regs) { } } -void TurboAssembler::MultiPop(RegList regs) { +void MacroAssembler::MultiPop(RegList regs) { int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -1944,7 +1944,7 @@ void TurboAssembler::MultiPop(RegList regs) { daddiu(sp, sp, stack_offset); } -void TurboAssembler::MultiPushFPU(DoubleRegList regs) { +void MacroAssembler::MultiPushFPU(DoubleRegList regs) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kDoubleSize; @@ -1957,7 +1957,7 @@ void TurboAssembler::MultiPushFPU(DoubleRegList regs) { } } -void TurboAssembler::MultiPopFPU(DoubleRegList regs) { +void MacroAssembler::MultiPopFPU(DoubleRegList regs) { int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -1969,7 +1969,7 @@ void TurboAssembler::MultiPopFPU(DoubleRegList regs) { daddiu(sp, sp, stack_offset); } -void TurboAssembler::MultiPushMSA(DoubleRegList regs) { +void MacroAssembler::MultiPushMSA(DoubleRegList regs) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kSimd128Size; @@ -1982,7 +1982,7 @@ void TurboAssembler::MultiPushMSA(DoubleRegList regs) { } } -void TurboAssembler::MultiPopMSA(DoubleRegList regs) { +void MacroAssembler::MultiPopMSA(DoubleRegList regs) { int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -1994,14 +1994,14 @@ void TurboAssembler::MultiPopMSA(DoubleRegList regs) { daddiu(sp, sp, stack_offset); } -void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos, +void MacroAssembler::Ext(Register rt, Register rs, uint16_t pos, uint16_t size) { DCHECK_LT(pos, 32); DCHECK_LT(pos + size, 33); ext_(rt, rs, pos, size); } -void TurboAssembler::Dext(Register rt, Register rs, uint16_t pos, +void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos, uint16_t size) { DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size && pos + size <= 64); @@ -2014,7 +2014,7 @@ void TurboAssembler::Dext(Register rt, Register rs, uint16_t pos, } } -void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos, +void MacroAssembler::Ins(Register rt, Register rs, uint16_t pos, uint16_t size) { DCHECK_LT(pos, 32); DCHECK_LE(pos + size, 32); @@ -2022,7 +2022,7 @@ void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos, ins_(rt, rs, pos, size); } -void TurboAssembler::Dins(Register rt, Register rs, uint16_t pos, +void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos, uint16_t size) { DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size && pos + size <= 64); @@ -2035,7 +2035,7 @@ void TurboAssembler::Dins(Register rt, Register rs, uint16_t pos, } } -void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, +void MacroAssembler::ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend) { dsrav(dest, source, pos); Dext(dest, dest, 0, size); @@ -2057,7 +2057,7 @@ void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, } } -void TurboAssembler::InsertBits(Register dest, Register source, Register pos, +void MacroAssembler::InsertBits(Register dest, Register source, Register pos, int size) { Dror(dest, dest, pos); Dins(dest, source, 0, size); @@ -2069,7 +2069,7 @@ void TurboAssembler::InsertBits(Register dest, Register source, Register pos, } } -void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) { +void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) { if (kArchVariant == kMips64r6) { // r6 neg_s changes the sign for NaN-like operands as well. neg_s(fd, fs); @@ -2094,7 +2094,7 @@ void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) { } } -void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) { +void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) { if (kArchVariant == kMips64r6) { // r6 neg_d changes the sign for NaN-like operands as well. neg_d(fd, fs); @@ -2119,14 +2119,14 @@ void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) { } } -void TurboAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) { +void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) { // Move the data from fs to t8. BlockTrampolinePoolScope block_trampoline_pool(this); mfc1(t8, fs); Cvt_d_uw(fd, t8); } -void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Convert rs to a FP value in fd. @@ -2139,14 +2139,14 @@ void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) { cvt_d_l(fd, fd); } -void TurboAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) { +void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Move the data from fs to t8. dmfc1(t8, fs); Cvt_d_ul(fd, t8); } -void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Convert rs to a FP value in fd. @@ -2174,14 +2174,14 @@ void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) { bind(&conversion_done); } -void TurboAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) { +void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Move the data from fs to t8. mfc1(t8, fs); Cvt_s_uw(fd, t8); } -void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Convert rs to a FP value in fd. DCHECK(rs != t9); @@ -2193,14 +2193,14 @@ void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) { cvt_s_l(fd, fd); } -void TurboAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) { +void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Move the data from fs to t8. dmfc1(t8, fs); Cvt_s_ul(fd, t8); } -void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Convert rs to a FP value in fd. @@ -2260,28 +2260,28 @@ void MacroAssembler::Trunc_l_ud(FPURegister fd, FPURegister fs, trunc_l_d(fd, fs); } -void TurboAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs, +void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch) { BlockTrampolinePoolScope block_trampoline_pool(this); Trunc_uw_d(t8, fs, scratch); mtc1(t8, fd); } -void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs, +void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch) { BlockTrampolinePoolScope block_trampoline_pool(this); Trunc_uw_s(t8, fs, scratch); mtc1(t8, fd); } -void TurboAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs, +void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch, Register result) { BlockTrampolinePoolScope block_trampoline_pool(this); Trunc_ul_d(t8, fs, scratch, result); dmtc1(t8, fd); } -void TurboAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs, +void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch, Register result) { BlockTrampolinePoolScope block_trampoline_pool(this); Trunc_ul_s(t8, fs, scratch, result); @@ -2304,7 +2304,7 @@ void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { ceil_w_d(fd, fs); } -void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, +void MacroAssembler::Trunc_uw_d(Register rd, FPURegister fs, FPURegister scratch) { DCHECK(fs != scratch); DCHECK(rd != at); @@ -2340,7 +2340,7 @@ void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, bind(&done); } -void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs, +void MacroAssembler::Trunc_uw_s(Register rd, FPURegister fs, FPURegister scratch) { DCHECK(fs != scratch); DCHECK(rd != at); @@ -2375,7 +2375,7 @@ void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs, bind(&done); } -void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs, +void MacroAssembler::Trunc_ul_d(Register rd, FPURegister fs, FPURegister scratch, Register result) { DCHECK(fs != scratch); DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at)); @@ -2430,7 +2430,7 @@ void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs, bind(&fail); } -void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs, +void MacroAssembler::Trunc_ul_s(Register rd, FPURegister fs, FPURegister scratch, Register result) { DCHECK(fs != scratch); DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at)); @@ -2490,7 +2490,7 @@ void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs, } template -void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, +void MacroAssembler::RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode, RoundFunc round) { BlockTrampolinePoolScope block_trampoline_pool(this); Register scratch = t8; @@ -2522,36 +2522,36 @@ void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, } } -void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Floor_d_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_floor, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->floor_l_d(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->floor_l_d(dst, src); }); } -void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Ceil_d_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_ceil, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->ceil_l_d(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->ceil_l_d(dst, src); }); } -void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Trunc_d_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_trunc, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->trunc_l_d(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->trunc_l_d(dst, src); }); } -void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Round_d_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_round, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->round_l_d(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->round_l_d(dst, src); }); } template -void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, +void MacroAssembler::RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode, RoundFunc round) { BlockTrampolinePoolScope block_trampoline_pool(this); Register scratch = t8; @@ -2586,35 +2586,35 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, } } -void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Floor_s_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_floor, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->floor_w_s(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->floor_w_s(dst, src); }); } -void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Ceil_s_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_ceil, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->ceil_w_s(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->ceil_w_s(dst, src); }); } -void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Trunc_s_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_trunc, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->trunc_w_s(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->trunc_w_s(dst, src); }); } -void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Round_s_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_round, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->round_w_s(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->round_w_s(dst, src); }); } -void TurboAssembler::LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx, +void MacroAssembler::LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx, MemOperand src) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -2640,7 +2640,7 @@ void TurboAssembler::LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx, } } -void TurboAssembler::StoreLane(MSASize sz, MSARegister src, uint8_t laneidx, +void MacroAssembler::StoreLane(MSASize sz, MSARegister src, uint8_t laneidx, MemOperand dst) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -2684,7 +2684,7 @@ void TurboAssembler::StoreLane(MSASize sz, MSARegister src, uint8_t laneidx, dotp_instr(dst, kSimd128ScratchReg, kSimd128RegZero); \ break; -void TurboAssembler::ExtMulLow(MSADataType type, MSARegister dst, +void MacroAssembler::ExtMulLow(MSADataType type, MSARegister dst, MSARegister src1, MSARegister src2) { switch (type) { EXT_MUL_BINOP(MSAS8, ilvr_b, dotp_s_h) @@ -2698,7 +2698,7 @@ void TurboAssembler::ExtMulLow(MSADataType type, MSARegister dst, } } -void TurboAssembler::ExtMulHigh(MSADataType type, MSARegister dst, +void MacroAssembler::ExtMulHigh(MSADataType type, MSARegister dst, MSARegister src1, MSARegister src2) { switch (type) { EXT_MUL_BINOP(MSAS8, ilvl_b, dotp_s_h) @@ -2713,7 +2713,7 @@ void TurboAssembler::ExtMulHigh(MSADataType type, MSARegister dst, } #undef EXT_MUL_BINOP -void TurboAssembler::LoadSplat(MSASize sz, MSARegister dst, MemOperand src) { +void MacroAssembler::LoadSplat(MSASize sz, MSARegister dst, MemOperand src) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); switch (sz) { @@ -2738,7 +2738,7 @@ void TurboAssembler::LoadSplat(MSASize sz, MSARegister dst, MemOperand src) { } } -void TurboAssembler::ExtAddPairwise(MSADataType type, MSARegister dst, +void MacroAssembler::ExtAddPairwise(MSADataType type, MSARegister dst, MSARegister src) { switch (type) { case MSAS8: @@ -2758,7 +2758,7 @@ void TurboAssembler::ExtAddPairwise(MSADataType type, MSARegister dst, } } -void TurboAssembler::MSARoundW(MSARegister dst, MSARegister src, +void MacroAssembler::MSARoundW(MSARegister dst, MSARegister src, FPURoundingMode mode) { BlockTrampolinePoolScope block_trampoline_pool(this); Register scratch = t8; @@ -2774,7 +2774,7 @@ void TurboAssembler::MSARoundW(MSARegister dst, MSARegister src, ctcmsa(MSACSR, scratch); } -void TurboAssembler::MSARoundD(MSARegister dst, MSARegister src, +void MacroAssembler::MSARoundD(MSARegister dst, MSARegister src, FPURoundingMode mode) { BlockTrampolinePoolScope block_trampoline_pool(this); Register scratch = t8; @@ -2818,7 +2818,7 @@ void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, sub_d(fd, scratch, fr); } -void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc, +void MacroAssembler::CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1, FPURegister cmp2) { if (kArchVariant == kMips64r6) { sizeField = sizeField == D ? L : W; @@ -2829,12 +2829,12 @@ void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc, } } -void TurboAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, +void MacroAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, FPURegister cmp2) { CompareF(sizeField, UN, cmp1, cmp2); } -void TurboAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) { +void MacroAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) { if (kArchVariant == kMips64r6) { bc1nez(target, kDoubleCompareReg); } else { @@ -2845,7 +2845,7 @@ void TurboAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) { } } -void TurboAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) { +void MacroAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) { if (kArchVariant == kMips64r6) { bc1eqz(target, kDoubleCompareReg); } else { @@ -2856,7 +2856,7 @@ void TurboAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) { } } -void TurboAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) { +void MacroAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) { bool long_branch = target->is_bound() ? !is_near(target) : is_trampoline_emitted(); if (long_branch) { @@ -2869,7 +2869,7 @@ void TurboAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) { } } -void TurboAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) { +void MacroAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) { bool long_branch = target->is_bound() ? !is_near(target) : is_trampoline_emitted(); if (long_branch) { @@ -2882,7 +2882,7 @@ void TurboAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) { } } -void TurboAssembler::BranchMSA(Label* target, MSABranchDF df, +void MacroAssembler::BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond, MSARegister wt, BranchDelaySlot bd) { { @@ -2904,7 +2904,7 @@ void TurboAssembler::BranchMSA(Label* target, MSABranchDF df, } } -void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target, +void MacroAssembler::BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, MSARegister wt, BranchDelaySlot bd) { if (IsEnabled(MIPS_SIMD)) { @@ -2961,7 +2961,7 @@ void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target, } } -void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { +void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); DCHECK(src_low != scratch); @@ -2970,14 +2970,14 @@ void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { mthc1(scratch, dst); } -void TurboAssembler::Move(FPURegister dst, uint32_t src) { +void MacroAssembler::Move(FPURegister dst, uint32_t src) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(static_cast(src))); mtc1(scratch, dst); } -void TurboAssembler::Move(FPURegister dst, uint64_t src) { +void MacroAssembler::Move(FPURegister dst, uint64_t src) { // Handle special values first. if (src == base::bit_cast(0.0) && has_double_zero_reg_set_) { mov_d(dst, kDoubleRegZero); @@ -3011,7 +3011,7 @@ void TurboAssembler::Move(FPURegister dst, uint64_t src) { } } -void TurboAssembler::Movz(Register rd, Register rs, Register rt) { +void MacroAssembler::Movz(Register rd, Register rs, Register rt) { if (kArchVariant == kMips64r6) { Label done; Branch(&done, ne, rt, Operand(zero_reg)); @@ -3022,7 +3022,7 @@ void TurboAssembler::Movz(Register rd, Register rs, Register rt) { } } -void TurboAssembler::Movn(Register rd, Register rs, Register rt) { +void MacroAssembler::Movn(Register rd, Register rs, Register rt) { if (kArchVariant == kMips64r6) { Label done; Branch(&done, eq, rt, Operand(zero_reg)); @@ -3033,7 +3033,7 @@ void TurboAssembler::Movn(Register rd, Register rs, Register rt) { } } -void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs, +void MacroAssembler::LoadZeroOnCondition(Register rd, Register rs, const Operand& rt, Condition cond) { BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { @@ -3125,7 +3125,7 @@ void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs, } } -void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, +void MacroAssembler::LoadZeroIfConditionNotZero(Register dest, Register condition) { if (kArchVariant == kMips64r6) { seleqz(dest, dest, condition); @@ -3134,7 +3134,7 @@ void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, } } -void TurboAssembler::LoadZeroIfConditionZero(Register dest, +void MacroAssembler::LoadZeroIfConditionZero(Register dest, Register condition) { if (kArchVariant == kMips64r6) { selnez(dest, dest, condition); @@ -3143,7 +3143,7 @@ void TurboAssembler::LoadZeroIfConditionZero(Register dest, } } -void TurboAssembler::LoadZeroIfFPUCondition(Register dest) { +void MacroAssembler::LoadZeroIfFPUCondition(Register dest) { if (kArchVariant == kMips64r6) { dmfc1(kScratchReg, kDoubleCompareReg); LoadZeroIfConditionNotZero(dest, kScratchReg); @@ -3152,7 +3152,7 @@ void TurboAssembler::LoadZeroIfFPUCondition(Register dest) { } } -void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest) { +void MacroAssembler::LoadZeroIfNotFPUCondition(Register dest) { if (kArchVariant == kMips64r6) { dmfc1(kScratchReg, kDoubleCompareReg); LoadZeroIfConditionZero(dest, kScratchReg); @@ -3161,19 +3161,19 @@ void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest) { } } -void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) { +void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) { movt(rd, rs, cc); } -void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) { +void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) { movf(rd, rs, cc); } -void TurboAssembler::Clz(Register rd, Register rs) { clz(rd, rs); } +void MacroAssembler::Clz(Register rd, Register rs) { clz(rd, rs); } -void TurboAssembler::Dclz(Register rd, Register rs) { dclz(rd, rs); } +void MacroAssembler::Dclz(Register rd, Register rs) { dclz(rd, rs); } -void TurboAssembler::Ctz(Register rd, Register rs) { +void MacroAssembler::Ctz(Register rd, Register rs) { if (kArchVariant == kMips64r6) { // We don't have an instruction to count the number of trailing zeroes. // Start by flipping the bits end-for-end so we can count the number of @@ -3199,7 +3199,7 @@ void TurboAssembler::Ctz(Register rd, Register rs) { } } -void TurboAssembler::Dctz(Register rd, Register rs) { +void MacroAssembler::Dctz(Register rd, Register rs) { if (kArchVariant == kMips64r6) { // We don't have an instruction to count the number of trailing zeroes. // Start by flipping the bits end-for-end so we can count the number of @@ -3225,7 +3225,7 @@ void TurboAssembler::Dctz(Register rd, Register rs) { } } -void TurboAssembler::Popcnt(Register rd, Register rs) { +void MacroAssembler::Popcnt(Register rd, Register rs) { ASM_CODE_COMMENT(this); // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel // @@ -3277,7 +3277,7 @@ void TurboAssembler::Popcnt(Register rd, Register rs) { srl(rd, rd, shift); } -void TurboAssembler::Dpopcnt(Register rd, Register rs) { +void MacroAssembler::Dpopcnt(Register rd, Register rs) { ASM_CODE_COMMENT(this); uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3 uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3 @@ -3307,7 +3307,7 @@ void TurboAssembler::Dpopcnt(Register rd, Register rs) { dsrl32(rd, rd, shift); } -void TurboAssembler::TryInlineTruncateDoubleToI(Register result, +void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label* done) { DoubleRegister single_scratch = kScratchDoubleReg.low(); @@ -3327,7 +3327,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result, Branch(done, eq, scratch, Operand(zero_reg)); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DoubleRegister double_input, StubCallMode stub_mode) { @@ -3365,19 +3365,19 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \ (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg))) -void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) { +void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) { DCHECK_EQ(kArchVariant, kMips64r6 ? is_int26(offset) : is_int16(offset)); BranchShort(offset, bdslot); } -void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs, +void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot); DCHECK(is_near); USE(is_near); } -void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) { +void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) { if (L->is_bound()) { if (is_near_branch(L)) { BranchShort(L, bdslot); @@ -3393,7 +3393,7 @@ void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) { } } -void TurboAssembler::Branch(Label* L, Condition cond, Register rs, +void MacroAssembler::Branch(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { if (L->is_bound()) { if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) { @@ -3424,7 +3424,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs, } } -void TurboAssembler::Branch(Label* L, Condition cond, Register rs, +void MacroAssembler::Branch(Label* L, Condition cond, Register rs, RootIndex index, BranchDelaySlot bdslot) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3432,7 +3432,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs, Branch(L, cond, rs, Operand(scratch), bdslot); } -void TurboAssembler::BranchShortHelper(int16_t offset, Label* L, +void MacroAssembler::BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot) { DCHECK(L == nullptr || offset == 0); offset = GetOffset(offset, L, OffsetSize::kOffset16); @@ -3442,13 +3442,13 @@ void TurboAssembler::BranchShortHelper(int16_t offset, Label* L, if (bdslot == PROTECT) nop(); } -void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) { +void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) { DCHECK(L == nullptr || offset == 0); offset = GetOffset(offset, L, OffsetSize::kOffset26); bc(offset); } -void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) { +void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT) { DCHECK(is_int26(offset)); BranchShortHelperR6(offset, nullptr); @@ -3458,7 +3458,7 @@ void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) { } } -void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { +void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT) { BranchShortHelperR6(0, L); } else { @@ -3466,7 +3466,7 @@ void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { } } -int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { +int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { if (L) { offset = branch_offset_helper(L, bits) >> 2; } else { @@ -3475,7 +3475,7 @@ int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { return offset; } -Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, +Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt, Register scratch) { Register r2 = no_reg; if (rt.is_reg()) { @@ -3488,14 +3488,14 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, return r2; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, +bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits) { if (!is_near(L, bits)) return false; *offset = GetOffset(*offset, L, bits); return true; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, +bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, Register* scratch, const Operand& rt) { if (!is_near(L, bits)) return false; *scratch = GetRtAsRegisterHelper(rt, *scratch); @@ -3503,7 +3503,7 @@ bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, return true; } -bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, +bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt) { DCHECK(L == nullptr || offset == 0); @@ -3716,7 +3716,7 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, return true; } -bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond, +bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { DCHECK(L == nullptr || offset == 0); @@ -3853,7 +3853,7 @@ bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond, return true; } -bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, +bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BRANCH_ARGS_CHECK(cond, rs, rt); @@ -3876,28 +3876,28 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, } } -void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs, +void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot); } -void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs, +void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BranchShortCheck(0, L, cond, rs, rt, bdslot); } -void TurboAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) { +void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) { BranchAndLinkShort(offset, bdslot); } -void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs, +void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot); DCHECK(is_near); USE(is_near); } -void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { +void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { if (L->is_bound()) { if (is_near_branch(L)) { BranchAndLinkShort(L, bdslot); @@ -3913,7 +3913,7 @@ void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { } } -void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs, +void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { if (L->is_bound()) { if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) { @@ -3936,7 +3936,7 @@ void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs, } } -void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, +void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot) { DCHECK(L == nullptr || offset == 0); offset = GetOffset(offset, L, OffsetSize::kOffset16); @@ -3946,13 +3946,13 @@ void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, if (bdslot == PROTECT) nop(); } -void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) { +void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) { DCHECK(L == nullptr || offset == 0); offset = GetOffset(offset, L, OffsetSize::kOffset26); balc(offset); } -void TurboAssembler::BranchAndLinkShort(int32_t offset, +void MacroAssembler::BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT) { DCHECK(is_int26(offset)); @@ -3963,7 +3963,7 @@ void TurboAssembler::BranchAndLinkShort(int32_t offset, } } -void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { +void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT) { BranchAndLinkShortHelperR6(0, L); } else { @@ -3971,7 +3971,7 @@ void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { } } -bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, +bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt) { DCHECK(L == nullptr || offset == 0); @@ -4113,7 +4113,7 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, // Pre r6 we need to use a bgezal or bltzal, but they can't be used directly // with the slt instructions. We could use sub or add instead but we would miss // overflow cases, so we keep slt and add an intermediate third instruction. -bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, +bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { @@ -4203,7 +4203,7 @@ bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, return true; } -bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, +bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { @@ -4227,7 +4227,7 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, } } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { ASM_CODE_COMMENT(this); DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); @@ -4237,11 +4237,11 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, FixedArray::kHeaderSize + constant_index * kPointerSize)); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { Ld(destination, MemOperand(kRootRegister, offset)); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { Move(destination, kRootRegister); @@ -4250,7 +4250,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -MemOperand TurboAssembler::ExternalReferenceAsOperand( +MemOperand MacroAssembler::ExternalReferenceAsOperand( ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t offset = @@ -4279,7 +4279,7 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand( return MemOperand(scratch, 0); } -void TurboAssembler::Jump(Register target, Condition cond, Register rs, +void MacroAssembler::Jump(Register target, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); if (kArchVariant == kMips64r6 && bd == PROTECT) { @@ -4303,7 +4303,7 @@ void TurboAssembler::Jump(Register target, Condition cond, Register rs, } } -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { Label skip; @@ -4320,13 +4320,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, } } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(static_cast(target), rmode, cond, rs, rt, bd); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { DCHECK(RelocInfo::IsCodeTarget(rmode)); @@ -4347,13 +4347,13 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, bind(&skip); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { li(t9, reference); Jump(t9); } // Note: To call gcc-compiled C code on mips, you must call through t9. -void TurboAssembler::Call(Register target, Condition cond, Register rs, +void MacroAssembler::Call(Register target, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); if (kArchVariant == kMips64r6 && bd == PROTECT) { @@ -4392,14 +4392,14 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, } } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); li(t9, Operand(static_cast(target), rmode), ADDRESS_LOAD); Call(t9, cond, rs, rt, bd); } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); @@ -4412,7 +4412,7 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(code.address(), rmode, cond, rs, rt, bd); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ASM_CODE_COMMENT(this); static_assert(kSystemPointerSize == 8); static_assert(kSmiTagSize == 1); @@ -4424,22 +4424,22 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { Ld(builtin_index, MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { Ld(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { ASM_CODE_COMMENT(this); LoadEntryFromBuiltinIndex(builtin_index); Call(builtin_index); } -void TurboAssembler::CallBuiltin(Builtin builtin) { +void MacroAssembler::CallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); Register temp = t9; switch (options().builtin_call_jump_mode) { @@ -4465,7 +4465,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin) { +void MacroAssembler::TailCallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); Register temp = t9; @@ -4492,7 +4492,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) { } } -void TurboAssembler::PatchAndJump(Address target) { +void MacroAssembler::PatchAndJump(Address target) { if (kArchVariant != kMips64r6) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -4512,7 +4512,7 @@ void TurboAssembler::PatchAndJump(Address target) { } } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { ASM_CODE_COMMENT(this); // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. @@ -4554,12 +4554,12 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); } -void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt, +void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { Jump(ra, cond, rs, rt, bd); } -void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) { +void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT && (!L->is_bound() || is_near_r6(L))) { BranchShortHelperR6(0, L); @@ -4583,7 +4583,7 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) { } } -void TurboAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) { +void MacroAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT && (is_int26(offset))) { BranchShortHelperR6(offset, nullptr); } else { @@ -4602,7 +4602,7 @@ void TurboAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) { } } -void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) { +void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT && (!L->is_bound() || is_near_r6(L))) { BranchAndLinkShortHelperR6(0, L); @@ -4622,7 +4622,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) { } } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode, Register scratch) { switch (type) { case kCountIsInteger: { @@ -4646,7 +4646,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, ArgumentsCountType type, ArgumentsCountMode mode, @@ -4662,7 +4662,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, } } -void TurboAssembler::DropAndRet(int drop) { +void MacroAssembler::DropAndRet(int drop) { int32_t drop_size = drop * kSystemPointerSize; DCHECK(is_int31(drop_size)); @@ -4678,7 +4678,7 @@ void TurboAssembler::DropAndRet(int drop) { } } -void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, +void MacroAssembler::DropAndRet(int drop, Condition cond, Register r1, const Operand& r2) { // Both Drop and Ret need to be conditional. Label skip; @@ -4694,7 +4694,7 @@ void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, } } -void TurboAssembler::Drop(int count, Condition cond, Register reg, +void MacroAssembler::Drop(int count, Condition cond, Register reg, const Operand& op) { if (count <= 0) { return; @@ -4725,28 +4725,28 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { } } -void TurboAssembler::Call(Label* target) { BranchAndLink(target); } +void MacroAssembler::Call(Label* target) { BranchAndLink(target); } -void TurboAssembler::LoadAddress(Register dst, Label* target) { +void MacroAssembler::LoadAddress(Register dst, Label* target) { uint64_t address = jump_address(target); li(dst, address); } -void TurboAssembler::Push(Smi smi) { +void MacroAssembler::Push(Smi smi) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(smi)); push(scratch); } -void TurboAssembler::Push(Handle handle) { +void MacroAssembler::Push(Handle handle) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(handle)); push(scratch); } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, Register scratch2, PushArrayOrder order) { DCHECK(!AreAliased(array, size, scratch, scratch2)); Label loop, entry; @@ -4806,12 +4806,12 @@ void MacroAssembler::PopStackHandler() { Sd(a1, MemOperand(scratch)); } -void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, +void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src) { sub_d(dst, src, kDoubleRegZero); } -void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { if (IsMipsSoftFloatABI) { if (kArchEndian == kLittle) { Move(dst, v0, v1); @@ -4823,7 +4823,7 @@ void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { } } -void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { if (IsMipsSoftFloatABI) { if (kArchEndian == kLittle) { Move(dst, a0, a1); @@ -4835,7 +4835,7 @@ void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { } } -void TurboAssembler::MovToFloatParameter(DoubleRegister src) { +void MacroAssembler::MovToFloatParameter(DoubleRegister src) { if (!IsMipsSoftFloatABI) { Move(f12, src); } else { @@ -4847,7 +4847,7 @@ void TurboAssembler::MovToFloatParameter(DoubleRegister src) { } } -void TurboAssembler::MovToFloatResult(DoubleRegister src) { +void MacroAssembler::MovToFloatResult(DoubleRegister src) { if (!IsMipsSoftFloatABI) { Move(f0, src); } else { @@ -4859,7 +4859,7 @@ void TurboAssembler::MovToFloatResult(DoubleRegister src) { } } -void TurboAssembler::MovToFloatParameters(DoubleRegister src1, +void MacroAssembler::MovToFloatParameters(DoubleRegister src1, DoubleRegister src2) { if (!IsMipsSoftFloatABI) { const DoubleRegister fparg2 = f13; @@ -4893,10 +4893,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); Ld(destination, MemOperand(kRootRegister, static_cast(offset))); } @@ -5139,7 +5139,7 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg, // ----------------------------------------------------------------------------- // Runtime calls. -void TurboAssembler::DaddOverflow(Register dst, Register left, +void MacroAssembler::DaddOverflow(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5170,7 +5170,7 @@ void TurboAssembler::DaddOverflow(Register dst, Register left, } } -void TurboAssembler::DsubOverflow(Register dst, Register left, +void MacroAssembler::DsubOverflow(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5201,7 +5201,7 @@ void TurboAssembler::DsubOverflow(Register dst, Register left, } } -void TurboAssembler::MulOverflow(Register dst, Register left, +void MacroAssembler::MulOverflow(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5231,7 +5231,7 @@ void TurboAssembler::MulOverflow(Register dst, Register left, xor_(overflow, overflow, scratch); } -void TurboAssembler::DMulOverflow(Register dst, Register left, +void MacroAssembler::DMulOverflow(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5347,10 +5347,10 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, // ----------------------------------------------------------------------------- // Debugging. -void TurboAssembler::Trap() { stop(); } -void TurboAssembler::DebugBreak() { stop(); } +void MacroAssembler::Trap() { stop(); } +void MacroAssembler::DebugBreak() { stop(); } -void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, +void MacroAssembler::Check(Condition cc, AbortReason reason, Register rs, Operand rt) { Label L; Branch(&L, cc, rs, rt); @@ -5359,7 +5359,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, bind(&L); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); if (v8_flags.code_comments) { @@ -5416,7 +5416,7 @@ void TurboAssembler::Abort(AbortReason reason) { } } -void TurboAssembler::LoadMap(Register destination, Register object) { +void MacroAssembler::LoadMap(Register destination, Register object) { Ld(destination, FieldMemOperand(object, HeapObject::kMapOffset)); } @@ -5427,16 +5427,16 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { Ld(dst, MemOperand(dst, Context::SlotOffset(index))); } -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(StackFrame::TypeToMarker(type))); PushCommonFrame(scratch); } -void TurboAssembler::Prologue() { PushStandardFrame(a1); } +void MacroAssembler::Prologue() { PushStandardFrame(a1); } -void TurboAssembler::EnterFrame(StackFrame::Type type) { +void MacroAssembler::EnterFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); Push(ra, fp); @@ -5451,7 +5451,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { #endif // V8_ENABLE_WEBASSEMBLY } -void TurboAssembler::LeaveFrame(StackFrame::Type type) { +void MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); daddiu(sp, fp, 2 * kPointerSize); Ld(ra, MemOperand(fp, 1 * kPointerSize)); @@ -5568,7 +5568,7 @@ void MacroAssembler::LeaveExitFrame(Register argument_count, bool do_return, daddiu(sp, sp, 2 * kPointerSize); } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -5584,7 +5584,7 @@ int TurboAssembler::ActivationFrameAlignment() { #endif // V8_HOST_ARCH_MIPS } -void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) { if (SmiValuesAre32Bits()) { Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset()))); } else { @@ -5594,7 +5594,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { } } -void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, +void MacroAssembler::JumpIfSmi(Register value, Label* smi_label, BranchDelaySlot bd) { DCHECK_EQ(0, kSmiTag); UseScratchRegisterScope temps(this); @@ -5614,12 +5614,12 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label, #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs, +void MacroAssembler::Assert(Condition cc, AbortReason reason, Register rs, Operand rt) { if (v8_flags.debug_code) Check(cc, reason, rs, rt); } -void TurboAssembler::AssertNotSmi(Register object) { +void MacroAssembler::AssertNotSmi(Register object) { if (v8_flags.debug_code) { ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -5630,7 +5630,7 @@ void TurboAssembler::AssertNotSmi(Register object) { } } -void TurboAssembler::AssertSmi(Register object) { +void MacroAssembler::AssertSmi(Register object) { if (v8_flags.debug_code) { ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -5760,7 +5760,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, #endif // V8_ENABLE_DEBUG_CODE -void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -5806,12 +5806,12 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, } } -void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { add_s(dst, src1, src2); } -void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -5857,12 +5857,12 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, } } -void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { add_s(dst, src1, src2); } -void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -5907,12 +5907,12 @@ void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, } } -void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { add_d(dst, src1, src2); } -void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -5957,14 +5957,14 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, } } -void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { add_d(dst, src1, src2); } static const int kRegisterPassedArguments = 8; -int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, +int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments) { int stack_passed_words = 0; int num_args = num_reg_arguments + num_double_arguments; @@ -5977,7 +5977,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, return stack_passed_words; } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { ASM_CODE_COMMENT(this); @@ -6005,12 +6005,12 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, } } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, Register scratch) { PrepareCallCFunction(num_reg_arguments, 0, scratch); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { ASM_CODE_COMMENT(this); @@ -6019,22 +6019,22 @@ void TurboAssembler::CallCFunction(ExternalReference function, CallCFunctionHelper(t9, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, +void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments) { ASM_CODE_COMMENT(this); CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunctionHelper(Register function, +void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); @@ -6129,7 +6129,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, #undef BRANCH_ARGS_CHECK -void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, +void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met) { ASM_CODE_COMMENT(this); And(scratch, object, Operand(~kPageAlignmentMask)); @@ -6153,7 +6153,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, UNREACHABLE(); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { // This push on ra and the pop below together ensure that we restore the // register ra, which is needed while computing the code start address. push(ra); @@ -6173,7 +6173,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { pop(ra); // Restore ra } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); @@ -6186,14 +6186,14 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::LoadCodeEntry(Register destination, +void MacroAssembler::LoadCodeEntry(Register destination, Register code_data_container_object) { ASM_CODE_COMMENT(this); Ld(destination, FieldMemOperand(code_data_container_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin( +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin( Register destination, Register code_data_container_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -6203,13 +6203,13 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin( Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_data_container_object) { +void MacroAssembler::CallCodeObject(Register code_data_container_object) { ASM_CODE_COMMENT(this); LoadCodeEntry(code_data_container_object, code_data_container_object); Call(code_data_container_object); } -void TurboAssembler::JumpCodeObject(Register code_data_container_object, +void MacroAssembler::JumpCodeObject(Register code_data_container_object, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); diff --git a/src/codegen/mips64/macro-assembler-mips64.h b/src/codegen/mips64/macro-assembler-mips64.h index c997872ebb..dcf3651f34 100644 --- a/src/codegen/mips64/macro-assembler-mips64.h +++ b/src/codegen/mips64/macro-assembler-mips64.h @@ -90,9 +90,9 @@ inline MemOperand CFunctionArgumentOperand(int index) { return MemOperand(sp, offset); } -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; // Activation support. void EnterFrame(StackFrame::Type type); @@ -913,79 +913,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Define an exception handler and bind a label. void BindExceptionHandler(Label* label) { bind(label); } - protected: - inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); - inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); - - private: - bool has_double_zero_reg_set_ = false; - - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it - // succeeds, otherwise falls through if result is saturated. On return - // 'result' either holds answer, or is clobbered on fall through. - void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, - Label* done); - - void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1, - FPURegister cmp2); - - void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, - FPURegister cmp2); - - void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, - MSARegister wt, BranchDelaySlot bd = PROTECT); - - void CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments); - - // TODO(mips) Reorder parameters so out parameters come last. - bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); - bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, - Register* scratch, const Operand& rt); - - void BranchShortHelperR6(int32_t offset, Label* L); - void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); - bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt); - bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs, - const Operand& rt, BranchDelaySlot bdslot); - bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, - const Operand& rt, BranchDelaySlot bdslot); - - void BranchAndLinkShortHelperR6(int32_t offset, Label* L); - void BranchAndLinkShortHelper(int16_t offset, Label* L, - BranchDelaySlot bdslot); - void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT); - void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT); - bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt); - bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond, - Register rs, const Operand& rt, - BranchDelaySlot bdslot); - bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt, - BranchDelaySlot bdslot); - void BranchLong(Label* L, BranchDelaySlot bdslot); - void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot); - - template - void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode, - RoundFunc round); - - template - void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode, - RoundFunc round); - - // Push a fixed frame, consisting of ra, fp. - void PushCommonFrame(Register marker_reg = no_reg); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - // It assumes that the arguments are located below the stack pointer. // argc is the number of arguments not including the receiver. // TODO(victorgomes): Remove this function once we stick with the reversed @@ -1269,17 +1196,83 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { DecodeField(reg, reg); } + protected: + inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); + inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); + private: + bool has_double_zero_reg_set_ = false; + // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, Label* done, InvokeType type); + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it + // succeeds, otherwise falls through if result is saturated. On return + // 'result' either holds answer, or is clobbered on fall through. + void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, + Label* done); + + void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1, + FPURegister cmp2); + + void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, + FPURegister cmp2); + + void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, + MSARegister wt, BranchDelaySlot bd = PROTECT); + + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments); + + // TODO(mips) Reorder parameters so out parameters come last. + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt); + + void BranchShortHelperR6(int32_t offset, Label* L); + void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); + bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt); + bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs, + const Operand& rt, BranchDelaySlot bdslot); + bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, + const Operand& rt, BranchDelaySlot bdslot); + + void BranchAndLinkShortHelperR6(int32_t offset, Label* L); + void BranchAndLinkShortHelper(int16_t offset, Label* L, + BranchDelaySlot bdslot); + void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT); + void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT); + bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt); + bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond, + Register rs, const Operand& rt, + BranchDelaySlot bdslot); + bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt, + BranchDelaySlot bdslot); + void BranchLong(Label* L, BranchDelaySlot bdslot); + void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot); + + template + void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode, + RoundFunc round); + + template + void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode, + RoundFunc round); + + // Push a fixed frame, consisting of ra, fp. + void PushCommonFrame(Register marker_reg = no_reg); + DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); }; template -void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, +void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count, Func GetLabelFunction) { // Ensure that dd-ed labels following this instruction use 8 bytes aligned // addresses. diff --git a/src/codegen/ppc/assembler-ppc.h b/src/codegen/ppc/assembler-ppc.h index 497af98ac2..6fd11e4a0f 100644 --- a/src/codegen/ppc/assembler-ppc.h +++ b/src/codegen/ppc/assembler-ppc.h @@ -1570,7 +1570,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope { private: friend class Assembler; - friend class TurboAssembler; + friend class MacroAssembler; Assembler* assembler_; RegList old_available_; diff --git a/src/codegen/ppc/macro-assembler-ppc.cc b/src/codegen/ppc/macro-assembler-ppc.cc index c45a843aed..c4fa474f73 100644 --- a/src/codegen/ppc/macro-assembler-ppc.cc +++ b/src/codegen/ppc/macro-assembler-ppc.cc @@ -55,7 +55,7 @@ constexpr int kStackSavedSavedFPSizeInBytes = } // namespace -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const { @@ -72,7 +72,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch1, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch1, Register scratch2, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; @@ -91,7 +91,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch1, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch1, +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch1, Register scratch2, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; @@ -109,12 +109,12 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch1, return bytes; } -void TurboAssembler::Jump(Register target) { +void MacroAssembler::Jump(Register target) { mtctr(target); bctr(); } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); @@ -127,11 +127,11 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, r0); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { LoadU64(destination, MemOperand(kRootRegister, offset), r0); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { mr(destination, kRootRegister); @@ -140,7 +140,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -MemOperand TurboAssembler::ExternalReferenceAsOperand( +MemOperand MacroAssembler::ExternalReferenceAsOperand( ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t offset = @@ -170,7 +170,7 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand( return MemOperand(scratch, 0); } -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond, CRegister cr) { Label skip; @@ -183,13 +183,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, bind(&skip); } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, CRegister cr) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(static_cast(target), rmode, cond, cr); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond, CRegister cr) { DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK_IMPLIES(options().isolate_independent_code, @@ -204,7 +204,7 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, Jump(static_cast(target_index), rmode, cond, cr); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); Move(scratch, reference); @@ -218,7 +218,7 @@ void TurboAssembler::Jump(const ExternalReference& reference) { Jump(scratch); } -void TurboAssembler::Call(Register target) { +void MacroAssembler::Call(Register target) { BlockTrampolinePoolScope block_trampoline_pool(this); // branch via link register and set LK bit for return point mtctr(target); @@ -236,7 +236,7 @@ int MacroAssembler::CallSizeNotPredictableCodeSize(Address target, return (2 + kMovInstructionsNoConstantPool) * kInstrSize; } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(cond == al); @@ -252,7 +252,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, bctrl(); } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, Condition cond) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(RelocInfo::IsCodeTarget(rmode)); @@ -268,7 +268,7 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(static_cast
(target_index), rmode, cond); } -void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) { +void MacroAssembler::CallBuiltin(Builtin builtin, Condition cond) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); // Use ip directly instead of using UseScratchRegisterScope, as we do not // preserve scratch registers across calls. @@ -309,7 +309,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond, +void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond, CRegister cr) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); @@ -352,13 +352,13 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond, } } -void TurboAssembler::Drop(int count) { +void MacroAssembler::Drop(int count) { if (count > 0) { AddS64(sp, sp, Operand(count * kSystemPointerSize), r0); } } -void TurboAssembler::Drop(Register count, Register scratch) { +void MacroAssembler::Drop(Register count, Register scratch) { ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2)); add(sp, sp, scratch); } @@ -376,19 +376,19 @@ Operand MacroAssembler::ClearedValue() const { static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); } -void TurboAssembler::Call(Label* target) { b(target, SetLK); } +void MacroAssembler::Call(Label* target) { b(target, SetLK); } -void TurboAssembler::Push(Handle handle) { +void MacroAssembler::Push(Handle handle) { mov(r0, Operand(handle)); push(r0); } -void TurboAssembler::Push(Smi smi) { +void MacroAssembler::Push(Smi smi) { mov(r0, Operand(smi)); push(r0); } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, Register scratch2, PushArrayOrder order) { Label loop, done; @@ -420,7 +420,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch, } } -void TurboAssembler::Move(Register dst, Handle value, +void MacroAssembler::Move(Register dst, Handle value, RelocInfo::Mode rmode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than @@ -438,7 +438,7 @@ void TurboAssembler::Move(Register dst, Handle value, } } -void TurboAssembler::Move(Register dst, ExternalReference reference) { +void MacroAssembler::Move(Register dst, ExternalReference reference) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -449,20 +449,20 @@ void TurboAssembler::Move(Register dst, ExternalReference reference) { mov(dst, Operand(reference)); } -void TurboAssembler::Move(Register dst, Register src, Condition cond) { +void MacroAssembler::Move(Register dst, Register src, Condition cond) { DCHECK(cond == al); if (dst != src) { mr(dst, src); } } -void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { if (dst != src) { fmr(dst, src); } } -void TurboAssembler::MultiPush(RegList regs, Register location) { +void MacroAssembler::MultiPush(RegList regs, Register location) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kSystemPointerSize; @@ -475,7 +475,7 @@ void TurboAssembler::MultiPush(RegList regs, Register location) { } } -void TurboAssembler::MultiPop(RegList regs, Register location) { +void MacroAssembler::MultiPop(RegList regs, Register location) { int16_t stack_offset = 0; for (int16_t i = 0; i < Register::kNumRegisters; i++) { @@ -487,7 +487,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) { addi(location, location, Operand(stack_offset)); } -void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) { +void MacroAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) { int16_t num_to_push = dregs.Count(); int16_t stack_offset = num_to_push * kDoubleSize; @@ -501,7 +501,7 @@ void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) { } } -void TurboAssembler::MultiPushV128(Simd128RegList simd_regs, Register scratch, +void MacroAssembler::MultiPushV128(Simd128RegList simd_regs, Register scratch, Register location) { int16_t num_to_push = simd_regs.Count(); int16_t stack_offset = num_to_push * kSimd128Size; @@ -516,7 +516,7 @@ void TurboAssembler::MultiPushV128(Simd128RegList simd_regs, Register scratch, } } -void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) { +void MacroAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) { int16_t stack_offset = 0; for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) { @@ -529,7 +529,7 @@ void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) { addi(location, location, Operand(stack_offset)); } -void TurboAssembler::MultiPopV128(Simd128RegList simd_regs, Register scratch, +void MacroAssembler::MultiPopV128(Simd128RegList simd_regs, Register scratch, Register location) { int16_t stack_offset = 0; @@ -543,7 +543,7 @@ void TurboAssembler::MultiPopV128(Simd128RegList simd_regs, Register scratch, addi(location, location, Operand(stack_offset)); } -void TurboAssembler::MultiPushF64AndV128(DoubleRegList dregs, +void MacroAssembler::MultiPushF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs, Register scratch1, Register scratch2, Register location) { @@ -580,7 +580,7 @@ void TurboAssembler::MultiPushF64AndV128(DoubleRegList dregs, #endif } -void TurboAssembler::MultiPopF64AndV128(DoubleRegList dregs, +void MacroAssembler::MultiPopF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs, Register scratch1, Register scratch2, Register location) { @@ -611,7 +611,7 @@ void TurboAssembler::MultiPopF64AndV128(DoubleRegList dregs, MultiPopDoubles(dregs); } -void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) { ASM_CODE_COMMENT(this); if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { mov(destination, Operand(ReadOnlyRootPtr(index), RelocInfo::Mode::NO_INFO)); @@ -620,7 +620,7 @@ void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { LoadRoot(destination, index); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index, +void MacroAssembler::LoadRoot(Register destination, RootIndex index, Condition cond) { DCHECK(cond == al); if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { @@ -631,7 +631,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index, MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0); } -void TurboAssembler::LoadTaggedPointerField(const Register& destination, +void MacroAssembler::LoadTaggedPointerField(const Register& destination, const MemOperand& field_operand, const Register& scratch) { if (COMPRESS_POINTERS_BOOL) { @@ -641,7 +641,7 @@ void TurboAssembler::LoadTaggedPointerField(const Register& destination, } } -void TurboAssembler::LoadAnyTaggedField(const Register& destination, +void MacroAssembler::LoadAnyTaggedField(const Register& destination, const MemOperand& field_operand, const Register& scratch) { if (COMPRESS_POINTERS_BOOL) { @@ -651,7 +651,7 @@ void TurboAssembler::LoadAnyTaggedField(const Register& destination, } } -void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc, +void MacroAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc, Register scratch) { if (SmiValuesAre31Bits()) { LoadU32(dst, src, scratch); @@ -662,7 +662,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc, SmiUntag(dst, rc); } -void TurboAssembler::StoreTaggedField(const Register& value, +void MacroAssembler::StoreTaggedField(const Register& value, const MemOperand& dst_field_operand, const Register& scratch) { if (COMPRESS_POINTERS_BOOL) { @@ -674,21 +674,21 @@ void TurboAssembler::StoreTaggedField(const Register& value, } } -void TurboAssembler::DecompressTaggedSigned(Register destination, +void MacroAssembler::DecompressTaggedSigned(Register destination, Register src) { RecordComment("[ DecompressTaggedSigned"); ZeroExtWord32(destination, src); RecordComment("]"); } -void TurboAssembler::DecompressTaggedSigned(Register destination, +void MacroAssembler::DecompressTaggedSigned(Register destination, MemOperand field_operand) { RecordComment("[ DecompressTaggedSigned"); LoadU32(destination, field_operand, r0); RecordComment("]"); } -void TurboAssembler::DecompressTaggedPointer(Register destination, +void MacroAssembler::DecompressTaggedPointer(Register destination, Register source) { RecordComment("[ DecompressTaggedPointer"); ZeroExtWord32(destination, source); @@ -696,7 +696,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination, RecordComment("]"); } -void TurboAssembler::DecompressTaggedPointer(Register destination, +void MacroAssembler::DecompressTaggedPointer(Register destination, MemOperand field_operand) { RecordComment("[ DecompressTaggedPointer"); LoadU32(destination, field_operand, r0); @@ -704,14 +704,14 @@ void TurboAssembler::DecompressTaggedPointer(Register destination, RecordComment("]"); } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, +void MacroAssembler::DecompressTaggedPointer(const Register& destination, Tagged_t immediate) { ASM_CODE_COMMENT(this); AddS64(destination, kPtrComprCageBaseRegister, Operand(immediate, RelocInfo::Mode::NO_INFO)); } -void TurboAssembler::DecompressAnyTagged(Register destination, +void MacroAssembler::DecompressAnyTagged(Register destination, MemOperand field_operand) { RecordComment("[ DecompressAnyTagged"); LoadU32(destination, field_operand, r0); @@ -719,7 +719,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination, RecordComment("]"); } -void TurboAssembler::DecompressAnyTagged(Register destination, +void MacroAssembler::DecompressAnyTagged(Register destination, Register source) { RecordComment("[ DecompressAnyTagged"); ZeroExtWord32(destination, source); @@ -727,7 +727,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination, RecordComment("]"); } -void TurboAssembler::LoadTaggedSignedField(Register destination, +void MacroAssembler::LoadTaggedSignedField(Register destination, MemOperand field_operand, Register scratch) { if (COMPRESS_POINTERS_BOOL) { @@ -776,17 +776,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; MultiPush(registers); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { if (registers.is_empty()) return; MultiPop(registers); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode) { DCHECK(!AreAliased(object, slot_address)); @@ -809,7 +809,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -832,7 +832,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { // Use CallRecordWriteStubSaveRegisters if the object and slot registers @@ -909,7 +909,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address, } } -void TurboAssembler::PushCommonFrame(Register marker_reg) { +void MacroAssembler::PushCommonFrame(Register marker_reg) { int fp_delta = 0; mflr(r0); if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { @@ -932,7 +932,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { addi(fp, sp, Operand(fp_delta * kSystemPointerSize)); } -void TurboAssembler::PushStandardFrame(Register function_reg) { +void MacroAssembler::PushStandardFrame(Register function_reg) { int fp_delta = 0; mflr(r0); if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { @@ -956,7 +956,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) { Push(kJavaScriptCallArgCountRegister); } -void TurboAssembler::RestoreFrameStateForTailCall() { +void MacroAssembler::RestoreFrameStateForTailCall() { if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { LoadU64(kConstantPoolRegister, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset)); @@ -967,61 +967,61 @@ void TurboAssembler::RestoreFrameStateForTailCall() { mtlr(r0); } -void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst, +void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src) { // Turn potential sNaN into qNaN. fsub(dst, src, kDoubleRegZero); } -void TurboAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) { +void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) { MovIntToDouble(dst, src, r0); fcfid(dst, dst); } -void TurboAssembler::ConvertUnsignedIntToDouble(Register src, +void MacroAssembler::ConvertUnsignedIntToDouble(Register src, DoubleRegister dst) { MovUnsignedIntToDouble(dst, src, r0); fcfid(dst, dst); } -void TurboAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) { +void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) { MovIntToDouble(dst, src, r0); fcfids(dst, dst); } -void TurboAssembler::ConvertUnsignedIntToFloat(Register src, +void MacroAssembler::ConvertUnsignedIntToFloat(Register src, DoubleRegister dst) { MovUnsignedIntToDouble(dst, src, r0); fcfids(dst, dst); } #if V8_TARGET_ARCH_PPC64 -void TurboAssembler::ConvertInt64ToDouble(Register src, +void MacroAssembler::ConvertInt64ToDouble(Register src, DoubleRegister double_dst) { MovInt64ToDouble(double_dst, src); fcfid(double_dst, double_dst); } -void TurboAssembler::ConvertUnsignedInt64ToFloat(Register src, +void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst) { MovInt64ToDouble(double_dst, src); fcfidus(double_dst, double_dst); } -void TurboAssembler::ConvertUnsignedInt64ToDouble(Register src, +void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst) { MovInt64ToDouble(double_dst, src); fcfidu(double_dst, double_dst); } -void TurboAssembler::ConvertInt64ToFloat(Register src, +void MacroAssembler::ConvertInt64ToFloat(Register src, DoubleRegister double_dst) { MovInt64ToDouble(double_dst, src); fcfids(double_dst, double_dst); } #endif -void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input, +void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input, #if !V8_TARGET_ARCH_PPC64 const Register dst_hi, #endif @@ -1044,7 +1044,7 @@ void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input, } #if V8_TARGET_ARCH_PPC64 -void TurboAssembler::ConvertDoubleToUnsignedInt64( +void MacroAssembler::ConvertDoubleToUnsignedInt64( const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode) { if (rounding_mode == kRoundToZero) { @@ -1060,7 +1060,7 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64( #endif #if !V8_TARGET_ARCH_PPC64 -void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high, +void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register scratch, Register shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1085,7 +1085,7 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high, +void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1107,7 +1107,7 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high, } } -void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high, +void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register scratch, Register shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1132,7 +1132,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high, +void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1154,7 +1154,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high, } } -void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high, +void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register scratch, Register shift) { DCHECK(!AreAliased(dst_low, src_high, shift)); @@ -1178,7 +1178,7 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high, +void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1201,7 +1201,7 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high, } #endif -void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress( +void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress( Register code_target_address) { // Builtins do not use the constant pool (see is_constant_pool_available). static_assert(InstructionStream::kOnHeapBodyIsContiguous); @@ -1217,19 +1217,19 @@ void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress( add(kConstantPoolRegister, kConstantPoolRegister, r0); } -void TurboAssembler::LoadPC(Register dst) { +void MacroAssembler::LoadPC(Register dst) { b(4, SetLK); mflr(dst); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { mflr(r0); LoadPC(dst); subi(dst, dst, Operand(pc_offset() - kInstrSize)); mtlr(r0); } -void TurboAssembler::LoadConstantPoolPointerRegister() { +void MacroAssembler::LoadConstantPoolPointerRegister() { // // Builtins do not use the constant pool (see is_constant_pool_available). static_assert(InstructionStream::kOnHeapBodyIsContiguous); @@ -1240,7 +1240,7 @@ void TurboAssembler::LoadConstantPoolPointerRegister() { ConstantPoolPosition(), delta); } -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { { ConstantPoolUnavailableScope constant_pool_unavailable(this); mov(r11, Operand(StackFrame::TypeToMarker(type))); @@ -1252,7 +1252,7 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) { } } -void TurboAssembler::Prologue() { +void MacroAssembler::Prologue() { PushStandardFrame(r4); if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { // base contains prologue address @@ -1261,7 +1261,7 @@ void TurboAssembler::Prologue() { } } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode) { int receiver_bytes = (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0; @@ -1287,7 +1287,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, ArgumentsCountType type, ArgumentsCountMode mode) { @@ -1302,7 +1302,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, } } -void TurboAssembler::EnterFrame(StackFrame::Type type, +void MacroAssembler::EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { if (V8_EMBEDDED_CONSTANT_POOL_BOOL && load_constant_pool_pointer_reg) { // Push type explicitly so we can leverage the constant pool. @@ -1326,7 +1326,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type, #endif // V8_ENABLE_WEBASSEMBLY } -int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { +int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { ConstantPoolUnavailableScope constant_pool_unavailable(this); // r3: preserved // r4: preserved @@ -1422,7 +1422,7 @@ void MacroAssembler::EnterExitFrame(int stack_space, StoreU64(r8, MemOperand(fp, ExitFrameConstants::kSPOffset)); } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if !defined(USE_SIMULATOR) // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -1472,11 +1472,11 @@ void MacroAssembler::LeaveExitFrame(Register argument_count, } } -void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { Move(dst, d1); } -void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { Move(dst, d1); } @@ -1487,10 +1487,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); LoadU64(destination, MemOperand(kRootRegister, offset), r0); } @@ -1783,7 +1783,7 @@ void MacroAssembler::CompareRoot(Register obj, RootIndex index) { CmpS64(obj, r0); } -void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left, +void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch) { @@ -1815,7 +1815,7 @@ void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left, if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC); } -void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left, +void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left, intptr_t right, Register overflow_dst, Register scratch) { @@ -1840,7 +1840,7 @@ void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left, } } -void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left, +void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch) { @@ -1871,7 +1871,7 @@ void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left, } } -void TurboAssembler::MinF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::MinF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, DoubleRegister scratch) { Label check_zero, return_left, return_right, return_nan, done; fcmpu(lhs, rhs); @@ -1919,7 +1919,7 @@ void TurboAssembler::MinF64(DoubleRegister dst, DoubleRegister lhs, bind(&done); } -void TurboAssembler::MaxF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::MaxF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, DoubleRegister scratch) { Label check_zero, return_left, return_right, return_nan, done; fcmpu(lhs, rhs); @@ -1965,7 +1965,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, ble(on_in_range); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DoubleRegister double_input, StubCallMode stub_mode) { @@ -1998,7 +1998,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, bind(&done); } -void TurboAssembler::TryInlineTruncateDoubleToI(Register result, +void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label* done) { DoubleRegister double_scratch = kScratchDoubleReg; @@ -2254,7 +2254,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, } } -void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { +void MacroAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { Label L; b(cond, &L, cr); Abort(reason); @@ -2262,7 +2262,7 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { bind(&L); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); if (v8_flags.code_comments) { @@ -2306,7 +2306,7 @@ void TurboAssembler::Abort(AbortReason reason) { // will not return here } -void TurboAssembler::LoadMap(Register destination, Register object) { +void MacroAssembler::LoadMap(Register destination, Register object) { LoadTaggedPointerField(destination, FieldMemOperand(object, HeapObject::kMapOffset), r0); } @@ -2321,11 +2321,11 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { } #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) { +void MacroAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) { if (v8_flags.debug_code) Check(cond, reason, cr); } -void TurboAssembler::AssertNotSmi(Register object) { +void MacroAssembler::AssertNotSmi(Register object) { if (v8_flags.debug_code) { static_assert(kSmiTag == 0); TestIfSmi(object, r0); @@ -2333,7 +2333,7 @@ void TurboAssembler::AssertNotSmi(Register object) { } } -void TurboAssembler::AssertSmi(Register object) { +void MacroAssembler::AssertSmi(Register object) { if (v8_flags.debug_code) { static_assert(kSmiTag == 0); TestIfSmi(object, r0); @@ -2431,7 +2431,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, static const int kRegisterPassedArguments = 8; -int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, +int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments) { int stack_passed_words = 0; if (num_double_arguments > DoubleRegister::kNumRegisters) { @@ -2445,7 +2445,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, return stack_passed_words; } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { int frame_alignment = ActivationFrameAlignment(); @@ -2474,16 +2474,16 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, StoreU64WithUpdate(r0, MemOperand(sp, -stack_space * kSystemPointerSize)); } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, Register scratch) { PrepareCallCFunction(num_reg_arguments, 0, scratch); } -void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); } +void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); } -void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); } +void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); } -void TurboAssembler::MovToFloatParameters(DoubleRegister src1, +void MacroAssembler::MovToFloatParameters(DoubleRegister src1, DoubleRegister src2) { if (src2 == d1) { DCHECK(src1 != d2); @@ -2495,7 +2495,7 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1, } } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments, bool has_function_descriptor) { @@ -2504,25 +2504,25 @@ void TurboAssembler::CallCFunction(ExternalReference function, has_function_descriptor); } -void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, +void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments, bool has_function_descriptor) { CallCFunctionHelper(function, num_reg_arguments, num_double_arguments, has_function_descriptor); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments, bool has_function_descriptor) { CallCFunction(function, num_arguments, 0, has_function_descriptor); } -void TurboAssembler::CallCFunction(Register function, int num_arguments, +void MacroAssembler::CallCFunction(Register function, int num_arguments, bool has_function_descriptor) { CallCFunction(function, num_arguments, 0, has_function_descriptor); } -void TurboAssembler::CallCFunctionHelper(Register function, +void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments, bool has_function_descriptor) { @@ -2605,7 +2605,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, } } -void TurboAssembler::CheckPageFlag( +void MacroAssembler::CheckPageFlag( Register object, Register scratch, // scratch may be same register as object int mask, Condition cc, Label* condition_met) { @@ -2625,9 +2625,9 @@ void TurboAssembler::CheckPageFlag( } } -void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); } +void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); } -void TurboAssembler::ResetRoundingMode() { +void MacroAssembler::ResetRoundingMode() { mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest) } @@ -2636,15 +2636,15 @@ void TurboAssembler::ResetRoundingMode() { // New MacroAssembler Interfaces added for PPC // //////////////////////////////////////////////////////////////////////////////// -void TurboAssembler::LoadIntLiteral(Register dst, int value) { +void MacroAssembler::LoadIntLiteral(Register dst, int value) { mov(dst, Operand(value)); } -void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) { +void MacroAssembler::LoadSmiLiteral(Register dst, Smi smi) { mov(dst, Operand(smi)); } -void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, +void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, base::Double value, Register scratch) { if (V8_EMBEDDED_CONSTANT_POOL_BOOL && is_constant_pool_available() && !(scratch == r0 && ConstantPoolAccessIsInOverflow())) { @@ -2693,7 +2693,7 @@ void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src, +void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src, Register scratch) { // sign-extend src to 64-bit #if V8_TARGET_ARCH_PPC64 @@ -2718,7 +2718,7 @@ void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src, addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src, +void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src, Register scratch) { // zero-extend src to 64-bit #if V8_TARGET_ARCH_PPC64 @@ -2743,7 +2743,7 @@ void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src, addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovInt64ToDouble(DoubleRegister dst, +void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, #if !V8_TARGET_ARCH_PPC64 Register src_hi, #endif @@ -2768,7 +2768,7 @@ void TurboAssembler::MovInt64ToDouble(DoubleRegister dst, } #if V8_TARGET_ARCH_PPC64 -void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst, +void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi, Register src_lo, Register scratch) { @@ -2788,7 +2788,7 @@ void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst, } #endif -void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src, +void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src, Register scratch) { #if V8_TARGET_ARCH_PPC64 if (CpuFeatures::IsSupported(PPC_8_PLUS)) { @@ -2807,7 +2807,7 @@ void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src, addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src, +void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch) { #if V8_TARGET_ARCH_PPC64 if (CpuFeatures::IsSupported(PPC_8_PLUS)) { @@ -2826,7 +2826,7 @@ void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src, addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) { +void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) { #if V8_TARGET_ARCH_PPC64 if (CpuFeatures::IsSupported(PPC_8_PLUS)) { mffprwz(dst, src); @@ -2841,7 +2841,7 @@ void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) { addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) { +void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) { #if V8_TARGET_ARCH_PPC64 if (CpuFeatures::IsSupported(PPC_8_PLUS)) { mffprd(dst, src); @@ -2857,7 +2857,7 @@ void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) { addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovDoubleToInt64( +void MacroAssembler::MovDoubleToInt64( #if !V8_TARGET_ARCH_PPC64 Register dst_hi, #endif @@ -2881,7 +2881,7 @@ void TurboAssembler::MovDoubleToInt64( addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src, +void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src, Register scratch) { if (CpuFeatures::IsSupported(PPC_8_PLUS)) { ShiftLeftU64(scratch, src, Operand(32)); @@ -2896,7 +2896,7 @@ void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src, addi(sp, sp, Operand(kFloatSize)); } -void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src, +void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src, DoubleRegister scratch) { if (CpuFeatures::IsSupported(PPC_8_PLUS)) { xscvdpspn(scratch, src); @@ -2910,12 +2910,12 @@ void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src, addi(sp, sp, Operand(kFloatSize)); } -void TurboAssembler::AddS64(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::AddS64(Register dst, Register src, Register value, OEBit s, RCBit r) { add(dst, src, value, s, r); } -void TurboAssembler::AddS64(Register dst, Register src, const Operand& value, +void MacroAssembler::AddS64(Register dst, Register src, const Operand& value, Register scratch, OEBit s, RCBit r) { if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) { addi(dst, src, value); @@ -2925,12 +2925,12 @@ void TurboAssembler::AddS64(Register dst, Register src, const Operand& value, } } -void TurboAssembler::SubS64(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::SubS64(Register dst, Register src, Register value, OEBit s, RCBit r) { sub(dst, src, value, s, r); } -void TurboAssembler::SubS64(Register dst, Register src, const Operand& value, +void MacroAssembler::SubS64(Register dst, Register src, const Operand& value, Register scratch, OEBit s, RCBit r) { if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) { subi(dst, src, value); @@ -2940,31 +2940,31 @@ void TurboAssembler::SubS64(Register dst, Register src, const Operand& value, } } -void TurboAssembler::AddS32(Register dst, Register src, Register value, +void MacroAssembler::AddS32(Register dst, Register src, Register value, RCBit r) { AddS64(dst, src, value, LeaveOE, r); extsw(dst, dst, r); } -void TurboAssembler::AddS32(Register dst, Register src, const Operand& value, +void MacroAssembler::AddS32(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { AddS64(dst, src, value, scratch, LeaveOE, r); extsw(dst, dst, r); } -void TurboAssembler::SubS32(Register dst, Register src, Register value, +void MacroAssembler::SubS32(Register dst, Register src, Register value, RCBit r) { SubS64(dst, src, value, LeaveOE, r); extsw(dst, dst, r); } -void TurboAssembler::SubS32(Register dst, Register src, const Operand& value, +void MacroAssembler::SubS32(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { SubS64(dst, src, value, scratch, LeaveOE, r); extsw(dst, dst, r); } -void TurboAssembler::MulS64(Register dst, Register src, const Operand& value, +void MacroAssembler::MulS64(Register dst, Register src, const Operand& value, Register scratch, OEBit s, RCBit r) { if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) { mulli(dst, src, value); @@ -2974,45 +2974,45 @@ void TurboAssembler::MulS64(Register dst, Register src, const Operand& value, } } -void TurboAssembler::MulS64(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::MulS64(Register dst, Register src, Register value, OEBit s, RCBit r) { mulld(dst, src, value, s, r); } -void TurboAssembler::MulS32(Register dst, Register src, const Operand& value, +void MacroAssembler::MulS32(Register dst, Register src, const Operand& value, Register scratch, OEBit s, RCBit r) { MulS64(dst, src, value, scratch, s, r); extsw(dst, dst, r); } -void TurboAssembler::MulS32(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::MulS32(Register dst, Register src, Register value, OEBit s, RCBit r) { MulS64(dst, src, value, s, r); extsw(dst, dst, r); } -void TurboAssembler::DivS64(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::DivS64(Register dst, Register src, Register value, OEBit s, RCBit r) { divd(dst, src, value, s, r); } -void TurboAssembler::DivU64(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::DivU64(Register dst, Register src, Register value, OEBit s, RCBit r) { divdu(dst, src, value, s, r); } -void TurboAssembler::DivS32(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::DivS32(Register dst, Register src, Register value, OEBit s, RCBit r) { divw(dst, src, value, s, r); extsw(dst, dst); } -void TurboAssembler::DivU32(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::DivU32(Register dst, Register src, Register value, OEBit s, RCBit r) { divwu(dst, src, value, s, r); ZeroExtWord32(dst, dst); } -void TurboAssembler::ModS64(Register dst, Register src, Register value) { +void MacroAssembler::ModS64(Register dst, Register src, Register value) { if (CpuFeatures::IsSupported(PPC_9_PLUS)) { modsd(dst, src, value); } else { @@ -3025,7 +3025,7 @@ void TurboAssembler::ModS64(Register dst, Register src, Register value) { } } -void TurboAssembler::ModU64(Register dst, Register src, Register value) { +void MacroAssembler::ModU64(Register dst, Register src, Register value) { if (CpuFeatures::IsSupported(PPC_9_PLUS)) { modud(dst, src, value); } else { @@ -3038,7 +3038,7 @@ void TurboAssembler::ModU64(Register dst, Register src, Register value) { } } -void TurboAssembler::ModS32(Register dst, Register src, Register value) { +void MacroAssembler::ModS32(Register dst, Register src, Register value) { if (CpuFeatures::IsSupported(PPC_9_PLUS)) { modsw(dst, src, value); } else { @@ -3051,7 +3051,7 @@ void TurboAssembler::ModS32(Register dst, Register src, Register value) { } extsw(dst, dst); } -void TurboAssembler::ModU32(Register dst, Register src, Register value) { +void MacroAssembler::ModU32(Register dst, Register src, Register value) { if (CpuFeatures::IsSupported(PPC_9_PLUS)) { moduw(dst, src, value); } else { @@ -3065,7 +3065,7 @@ void TurboAssembler::ModU32(Register dst, Register src, Register value) { ZeroExtWord32(dst, dst); } -void TurboAssembler::AndU64(Register dst, Register src, const Operand& value, +void MacroAssembler::AndU64(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { if (is_uint16(value.immediate()) && r == SetRC) { andi(dst, src, value); @@ -3075,12 +3075,12 @@ void TurboAssembler::AndU64(Register dst, Register src, const Operand& value, } } -void TurboAssembler::AndU64(Register dst, Register src, Register value, +void MacroAssembler::AndU64(Register dst, Register src, Register value, RCBit r) { and_(dst, src, value, r); } -void TurboAssembler::OrU64(Register dst, Register src, const Operand& value, +void MacroAssembler::OrU64(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { if (is_int16(value.immediate()) && r == LeaveRC) { ori(dst, src, value); @@ -3090,12 +3090,12 @@ void TurboAssembler::OrU64(Register dst, Register src, const Operand& value, } } -void TurboAssembler::OrU64(Register dst, Register src, Register value, +void MacroAssembler::OrU64(Register dst, Register src, Register value, RCBit r) { orx(dst, src, value, r); } -void TurboAssembler::XorU64(Register dst, Register src, const Operand& value, +void MacroAssembler::XorU64(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { if (is_int16(value.immediate()) && r == LeaveRC) { xori(dst, src, value); @@ -3105,112 +3105,112 @@ void TurboAssembler::XorU64(Register dst, Register src, const Operand& value, } } -void TurboAssembler::XorU64(Register dst, Register src, Register value, +void MacroAssembler::XorU64(Register dst, Register src, Register value, RCBit r) { xor_(dst, src, value, r); } -void TurboAssembler::AndU32(Register dst, Register src, const Operand& value, +void MacroAssembler::AndU32(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { AndU64(dst, src, value, scratch, r); extsw(dst, dst, r); } -void TurboAssembler::AndU32(Register dst, Register src, Register value, +void MacroAssembler::AndU32(Register dst, Register src, Register value, RCBit r) { AndU64(dst, src, value, r); extsw(dst, dst, r); } -void TurboAssembler::OrU32(Register dst, Register src, const Operand& value, +void MacroAssembler::OrU32(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { OrU64(dst, src, value, scratch, r); extsw(dst, dst, r); } -void TurboAssembler::OrU32(Register dst, Register src, Register value, +void MacroAssembler::OrU32(Register dst, Register src, Register value, RCBit r) { OrU64(dst, src, value, r); extsw(dst, dst, r); } -void TurboAssembler::XorU32(Register dst, Register src, const Operand& value, +void MacroAssembler::XorU32(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { XorU64(dst, src, value, scratch, r); extsw(dst, dst, r); } -void TurboAssembler::XorU32(Register dst, Register src, Register value, +void MacroAssembler::XorU32(Register dst, Register src, Register value, RCBit r) { XorU64(dst, src, value, r); extsw(dst, dst, r); } -void TurboAssembler::ShiftLeftU64(Register dst, Register src, +void MacroAssembler::ShiftLeftU64(Register dst, Register src, const Operand& value, RCBit r) { sldi(dst, src, value, r); } -void TurboAssembler::ShiftRightU64(Register dst, Register src, +void MacroAssembler::ShiftRightU64(Register dst, Register src, const Operand& value, RCBit r) { srdi(dst, src, value, r); } -void TurboAssembler::ShiftRightS64(Register dst, Register src, +void MacroAssembler::ShiftRightS64(Register dst, Register src, const Operand& value, RCBit r) { sradi(dst, src, value.immediate(), r); } -void TurboAssembler::ShiftLeftU32(Register dst, Register src, +void MacroAssembler::ShiftLeftU32(Register dst, Register src, const Operand& value, RCBit r) { slwi(dst, src, value, r); } -void TurboAssembler::ShiftRightU32(Register dst, Register src, +void MacroAssembler::ShiftRightU32(Register dst, Register src, const Operand& value, RCBit r) { srwi(dst, src, value, r); } -void TurboAssembler::ShiftRightS32(Register dst, Register src, +void MacroAssembler::ShiftRightS32(Register dst, Register src, const Operand& value, RCBit r) { srawi(dst, src, value.immediate(), r); } -void TurboAssembler::ShiftLeftU64(Register dst, Register src, Register value, +void MacroAssembler::ShiftLeftU64(Register dst, Register src, Register value, RCBit r) { sld(dst, src, value, r); } -void TurboAssembler::ShiftRightU64(Register dst, Register src, Register value, +void MacroAssembler::ShiftRightU64(Register dst, Register src, Register value, RCBit r) { srd(dst, src, value, r); } -void TurboAssembler::ShiftRightS64(Register dst, Register src, Register value, +void MacroAssembler::ShiftRightS64(Register dst, Register src, Register value, RCBit r) { srad(dst, src, value, r); } -void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register value, +void MacroAssembler::ShiftLeftU32(Register dst, Register src, Register value, RCBit r) { slw(dst, src, value, r); } -void TurboAssembler::ShiftRightU32(Register dst, Register src, Register value, +void MacroAssembler::ShiftRightU32(Register dst, Register src, Register value, RCBit r) { srw(dst, src, value, r); } -void TurboAssembler::ShiftRightS32(Register dst, Register src, Register value, +void MacroAssembler::ShiftRightS32(Register dst, Register src, Register value, RCBit r) { sraw(dst, src, value, r); } -void TurboAssembler::CmpS64(Register src1, Register src2, CRegister cr) { +void MacroAssembler::CmpS64(Register src1, Register src2, CRegister cr) { cmp(src1, src2, cr); } -void TurboAssembler::CmpS64(Register src1, const Operand& src2, +void MacroAssembler::CmpS64(Register src1, const Operand& src2, Register scratch, CRegister cr) { intptr_t value = src2.immediate(); if (is_int16(value)) { @@ -3221,7 +3221,7 @@ void TurboAssembler::CmpS64(Register src1, const Operand& src2, } } -void TurboAssembler::CmpU64(Register src1, const Operand& src2, +void MacroAssembler::CmpU64(Register src1, const Operand& src2, Register scratch, CRegister cr) { intptr_t value = src2.immediate(); if (is_uint16(value)) { @@ -3232,11 +3232,11 @@ void TurboAssembler::CmpU64(Register src1, const Operand& src2, } } -void TurboAssembler::CmpU64(Register src1, Register src2, CRegister cr) { +void MacroAssembler::CmpU64(Register src1, Register src2, CRegister cr) { cmpl(src1, src2, cr); } -void TurboAssembler::CmpS32(Register src1, const Operand& src2, +void MacroAssembler::CmpS32(Register src1, const Operand& src2, Register scratch, CRegister cr) { intptr_t value = src2.immediate(); if (is_int16(value)) { @@ -3247,11 +3247,11 @@ void TurboAssembler::CmpS32(Register src1, const Operand& src2, } } -void TurboAssembler::CmpS32(Register src1, Register src2, CRegister cr) { +void MacroAssembler::CmpS32(Register src1, Register src2, CRegister cr) { cmpw(src1, src2, cr); } -void TurboAssembler::CmpU32(Register src1, const Operand& src2, +void MacroAssembler::CmpU32(Register src1, const Operand& src2, Register scratch, CRegister cr) { intptr_t value = src2.immediate(); if (is_uint16(value)) { @@ -3262,55 +3262,55 @@ void TurboAssembler::CmpU32(Register src1, const Operand& src2, } } -void TurboAssembler::CmpU32(Register src1, Register src2, CRegister cr) { +void MacroAssembler::CmpU32(Register src1, Register src2, CRegister cr) { cmplw(src1, src2, cr); } -void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fadd(dst, lhs, rhs, r); } -void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fsub(dst, lhs, rhs, r); } -void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fmul(dst, lhs, rhs, r); } -void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fdiv(dst, lhs, rhs, r); } -void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fadd(dst, lhs, rhs, r); frsp(dst, dst, r); } -void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fsub(dst, lhs, rhs, r); frsp(dst, dst, r); } -void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fmul(dst, lhs, rhs, r); frsp(dst, dst, r); } -void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fdiv(dst, lhs, rhs, r); frsp(dst, dst, r); } -void TurboAssembler::CopySignF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::CopySignF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fcpsgn(dst, rhs, lhs, r); } @@ -3513,7 +3513,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi, V(StoreU64WithUpdate, stdu, stdux) #define MEM_OP_WITH_ALIGN_FUNCTION(name, ri_op, rr_op) \ - void TurboAssembler::name(Register reg, const MemOperand& mem, \ + void MacroAssembler::name(Register reg, const MemOperand& mem, \ Register scratch) { \ GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op); \ } @@ -3527,7 +3527,7 @@ MEM_OP_WITH_ALIGN_LIST(MEM_OP_WITH_ALIGN_FUNCTION) V(StoreU64, std, pstd, stdx) #define MEM_OP_WITH_ALIGN_PREFIXED_FUNCTION(name, ri_op, rip_op, rr_op) \ - void TurboAssembler::name(Register reg, const MemOperand& mem, \ + void MacroAssembler::name(Register reg, const MemOperand& mem, \ Register scratch) { \ GenerateMemoryOperationWithAlignPrefixed(reg, mem, ri_op, rip_op, rr_op); \ } @@ -3542,7 +3542,7 @@ MEM_OP_WITH_ALIGN_PREFIXED_LIST(MEM_OP_WITH_ALIGN_PREFIXED_FUNCTION) V(StoreF32WithUpdate, DoubleRegister, stfsu, stfsux) #define MEM_OP_FUNCTION(name, result_t, ri_op, rr_op) \ - void TurboAssembler::name(result_t reg, const MemOperand& mem, \ + void MacroAssembler::name(result_t reg, const MemOperand& mem, \ Register scratch) { \ GenerateMemoryOperation(reg, mem, ri_op, rr_op); \ } @@ -3564,7 +3564,7 @@ MEM_OP_LIST(MEM_OP_FUNCTION) V(StoreF32, DoubleRegister, stfs, pstfs, stfsx) #define MEM_OP_PREFIXED_FUNCTION(name, result_t, ri_op, rip_op, rr_op) \ - void TurboAssembler::name(result_t reg, const MemOperand& mem, \ + void MacroAssembler::name(result_t reg, const MemOperand& mem, \ Register scratch) { \ GenerateMemoryOperationPrefixed(reg, mem, ri_op, rip_op, rr_op); \ } @@ -3581,7 +3581,7 @@ MEM_OP_PREFIXED_LIST(MEM_OP_PREFIXED_FUNCTION) V(LoadSimd128Uint8, lxsibzx) #define MEM_OP_SIMD_FUNCTION(name, rr_op) \ - void TurboAssembler::name(Simd128Register reg, const MemOperand& mem, \ + void MacroAssembler::name(Simd128Register reg, const MemOperand& mem, \ Register scratch) { \ GenerateMemoryOperationRR(reg, mem, rr_op); \ } @@ -3589,7 +3589,7 @@ MEM_OP_SIMD_LIST(MEM_OP_SIMD_FUNCTION) #undef MEM_OP_SIMD_LIST #undef MEM_OP_SIMD_FUNCTION -void TurboAssembler::LoadS8(Register dst, const MemOperand& mem, +void MacroAssembler::LoadS8(Register dst, const MemOperand& mem, Register scratch) { LoadU8(dst, mem, scratch); extsb(dst, dst); @@ -3605,13 +3605,13 @@ void TurboAssembler::LoadS8(Register dst, const MemOperand& mem, #ifdef V8_TARGET_BIG_ENDIAN #define MEM_LE_OP_FUNCTION(name, op) \ - void TurboAssembler::name##LE(Register reg, const MemOperand& mem, \ + void MacroAssembler::name##LE(Register reg, const MemOperand& mem, \ Register scratch) { \ GenerateMemoryOperationRR(reg, mem, op); \ } #else #define MEM_LE_OP_FUNCTION(name, op) \ - void TurboAssembler::name##LE(Register reg, const MemOperand& mem, \ + void MacroAssembler::name##LE(Register reg, const MemOperand& mem, \ Register scratch) { \ name(reg, mem, scratch); \ } @@ -3621,7 +3621,7 @@ MEM_LE_OP_LIST(MEM_LE_OP_FUNCTION) #undef MEM_LE_OP_FUNCTION #undef MEM_LE_OP_LIST -void TurboAssembler::LoadS32LE(Register dst, const MemOperand& mem, +void MacroAssembler::LoadS32LE(Register dst, const MemOperand& mem, Register scratch) { #ifdef V8_TARGET_BIG_ENDIAN LoadU32LE(dst, mem, scratch); @@ -3631,7 +3631,7 @@ void TurboAssembler::LoadS32LE(Register dst, const MemOperand& mem, #endif } -void TurboAssembler::LoadS16LE(Register dst, const MemOperand& mem, +void MacroAssembler::LoadS16LE(Register dst, const MemOperand& mem, Register scratch) { #ifdef V8_TARGET_BIG_ENDIAN LoadU16LE(dst, mem, scratch); @@ -3641,7 +3641,7 @@ void TurboAssembler::LoadS16LE(Register dst, const MemOperand& mem, #endif } -void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& mem, +void MacroAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& mem, Register scratch, Register scratch2) { #ifdef V8_TARGET_BIG_ENDIAN LoadU64LE(scratch, mem, scratch2); @@ -3653,7 +3653,7 @@ void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& mem, #endif } -void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& mem, +void MacroAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& mem, Register scratch, Register scratch2) { #ifdef V8_TARGET_BIG_ENDIAN LoadU32LE(scratch, mem, scratch2); @@ -3665,7 +3665,7 @@ void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& mem, #endif } -void TurboAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem, +void MacroAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem, Register scratch, Register scratch2) { #ifdef V8_TARGET_BIG_ENDIAN StoreF64(dst, mem, scratch2); @@ -3676,7 +3676,7 @@ void TurboAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem, #endif } -void TurboAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem, +void MacroAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem, Register scratch, Register scratch2) { #ifdef V8_TARGET_BIG_ENDIAN StoreF32(dst, mem, scratch2); @@ -3749,7 +3749,7 @@ void TurboAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem, V(S128AndNot, vandc) #define EMIT_SIMD_BINOP(name, op) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2) { \ op(dst, src1, src2); \ } @@ -3772,13 +3772,13 @@ SIMD_BINOP_LIST(EMIT_SIMD_BINOP) V(I8x16ShrU, vsrb) #define EMIT_SIMD_SHIFT(name, op) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Register src2, Simd128Register scratch) { \ mtvsrd(scratch, src2); \ vspltb(scratch, scratch, Operand(7)); \ op(dst, src1, scratch); \ } \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ const Operand& src2, Register scratch1, \ Simd128Register scratch2) { \ mov(scratch1, src2); \ @@ -3815,7 +3815,7 @@ SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT) V(I8x16Popcnt, vpopcntb) #define EMIT_SIMD_UNOP(name, op) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src) { \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src) { \ op(dst, src); \ } SIMD_UNOP_LIST(EMIT_SIMD_UNOP) @@ -3836,7 +3836,7 @@ SIMD_UNOP_LIST(EMIT_SIMD_UNOP) V(I16x8ExtMulHighI8x16U, vmuleub, vmuloub, vmrghh) #define EMIT_SIMD_EXT_MUL(name, mul_even, mul_odd, merge) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2, Simd128Register scratch) { \ EXT_MUL(scratch, dst, mul_even, mul_odd) \ merge(dst, scratch, dst); \ @@ -3852,7 +3852,7 @@ SIMD_EXT_MUL_LIST(EMIT_SIMD_EXT_MUL) V(I8x16AllTrue, vcmpgtub) #define EMIT_SIMD_ALL_TRUE(name, op) \ - void TurboAssembler::name(Register dst, Simd128Register src, \ + void MacroAssembler::name(Register dst, Simd128Register src, \ Register scratch1, Register scratch2, \ Simd128Register scratch3) { \ constexpr uint8_t fxm = 0x2; /* field mask. */ \ @@ -3875,7 +3875,7 @@ SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE) V(I16x8BitMask, vextracthm, 0x10203040506070) #define EMIT_SIMD_BITMASK(name, op, indicies) \ - void TurboAssembler::name(Register dst, Simd128Register src, \ + void MacroAssembler::name(Register dst, Simd128Register src, \ Register scratch1, Simd128Register scratch2) { \ if (CpuFeatures::IsSupported(PPC_10_PLUS)) { \ op(dst, src); \ @@ -3898,7 +3898,7 @@ SIMD_BITMASK_LIST(EMIT_SIMD_BITMASK) V(F32x4Qfms, xvnmsubmsp) #define EMIT_SIMD_QFM(name, op) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2, Simd128Register src3, \ Simd128Register scratch) { \ Simd128Register dest = dst; \ @@ -3915,7 +3915,7 @@ SIMD_QFM_LIST(EMIT_SIMD_QFM) #undef EMIT_SIMD_QFM #undef SIMD_QFM_LIST -void TurboAssembler::I64x2ExtMulLowI32x4S(Simd128Register dst, +void MacroAssembler::I64x2ExtMulLowI32x4S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { @@ -3925,7 +3925,7 @@ void TurboAssembler::I64x2ExtMulLowI32x4S(Simd128Register dst, vinsertd(dst, scratch, Operand(0)); } -void TurboAssembler::I64x2ExtMulHighI32x4S(Simd128Register dst, +void MacroAssembler::I64x2ExtMulHighI32x4S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { @@ -3935,7 +3935,7 @@ void TurboAssembler::I64x2ExtMulHighI32x4S(Simd128Register dst, vor(dst, scratch, scratch); } -void TurboAssembler::I64x2ExtMulLowI32x4U(Simd128Register dst, +void MacroAssembler::I64x2ExtMulLowI32x4U(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { @@ -3945,7 +3945,7 @@ void TurboAssembler::I64x2ExtMulLowI32x4U(Simd128Register dst, vinsertd(dst, scratch, Operand(0)); } -void TurboAssembler::I64x2ExtMulHighI32x4U(Simd128Register dst, +void MacroAssembler::I64x2ExtMulHighI32x4U(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { @@ -3956,7 +3956,7 @@ void TurboAssembler::I64x2ExtMulHighI32x4U(Simd128Register dst, } #undef EXT_MUL -void TurboAssembler::LoadSimd128LE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadSimd128LE(Simd128Register dst, const MemOperand& mem, Register scratch) { #ifdef V8_TARGET_BIG_ENDIAN LoadSimd128(dst, mem, scratch); @@ -3966,7 +3966,7 @@ void TurboAssembler::LoadSimd128LE(Simd128Register dst, const MemOperand& mem, #endif } -void TurboAssembler::StoreSimd128LE(Simd128Register src, const MemOperand& mem, +void MacroAssembler::StoreSimd128LE(Simd128Register src, const MemOperand& mem, Register scratch1, Simd128Register scratch2) { #ifdef V8_TARGET_BIG_ENDIAN @@ -3977,7 +3977,7 @@ void TurboAssembler::StoreSimd128LE(Simd128Register src, const MemOperand& mem, #endif } -void TurboAssembler::F64x2Splat(Simd128Register dst, DoubleRegister src, +void MacroAssembler::F64x2Splat(Simd128Register dst, DoubleRegister src, Register scratch) { constexpr int lane_width_in_bytes = 8; MovDoubleToInt64(scratch, src); @@ -3985,35 +3985,35 @@ void TurboAssembler::F64x2Splat(Simd128Register dst, DoubleRegister src, vinsertd(dst, dst, Operand(1 * lane_width_in_bytes)); } -void TurboAssembler::F32x4Splat(Simd128Register dst, DoubleRegister src, +void MacroAssembler::F32x4Splat(Simd128Register dst, DoubleRegister src, DoubleRegister scratch1, Register scratch2) { MovFloatToInt(scratch2, src, scratch1); mtvsrd(dst, scratch2); vspltw(dst, dst, Operand(1)); } -void TurboAssembler::I64x2Splat(Simd128Register dst, Register src) { +void MacroAssembler::I64x2Splat(Simd128Register dst, Register src) { constexpr int lane_width_in_bytes = 8; mtvsrd(dst, src); vinsertd(dst, dst, Operand(1 * lane_width_in_bytes)); } -void TurboAssembler::I32x4Splat(Simd128Register dst, Register src) { +void MacroAssembler::I32x4Splat(Simd128Register dst, Register src) { mtvsrd(dst, src); vspltw(dst, dst, Operand(1)); } -void TurboAssembler::I16x8Splat(Simd128Register dst, Register src) { +void MacroAssembler::I16x8Splat(Simd128Register dst, Register src) { mtvsrd(dst, src); vsplth(dst, dst, Operand(3)); } -void TurboAssembler::I8x16Splat(Simd128Register dst, Register src) { +void MacroAssembler::I8x16Splat(Simd128Register dst, Register src) { mtvsrd(dst, src); vspltb(dst, dst, Operand(7)); } -void TurboAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src, +void MacroAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2) { @@ -4023,7 +4023,7 @@ void TurboAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src, MovInt64ToDouble(dst, scratch2); } -void TurboAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src, +void MacroAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2, Register scratch3) { @@ -4033,7 +4033,7 @@ void TurboAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src, MovIntToFloat(dst, scratch2, scratch3); } -void TurboAssembler::I64x2ExtractLane(Register dst, Simd128Register src, +void MacroAssembler::I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch) { constexpr int lane_width_in_bytes = 8; @@ -4041,7 +4041,7 @@ void TurboAssembler::I64x2ExtractLane(Register dst, Simd128Register src, mfvsrd(dst, scratch); } -void TurboAssembler::I32x4ExtractLane(Register dst, Simd128Register src, +void MacroAssembler::I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch) { constexpr int lane_width_in_bytes = 4; @@ -4049,7 +4049,7 @@ void TurboAssembler::I32x4ExtractLane(Register dst, Simd128Register src, mfvsrd(dst, scratch); } -void TurboAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src, +void MacroAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch) { constexpr int lane_width_in_bytes = 2; @@ -4057,28 +4057,28 @@ void TurboAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src, mfvsrd(dst, scratch); } -void TurboAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src, +void MacroAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch) { I16x8ExtractLaneU(dst, src, imm_lane_idx, scratch); extsh(dst, dst); } -void TurboAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src, +void MacroAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch) { vextractub(scratch, src, Operand(15 - imm_lane_idx)); mfvsrd(dst, scratch); } -void TurboAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src, +void MacroAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch) { I8x16ExtractLaneU(dst, src, imm_lane_idx, scratch); extsb(dst, dst); } -void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, Simd128Register scratch2) { @@ -4095,7 +4095,7 @@ void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, } } -void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, DoubleRegister scratch2, @@ -4113,7 +4113,7 @@ void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, } } -void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch) { constexpr int lane_width_in_bytes = 8; @@ -4128,7 +4128,7 @@ void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, } } -void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch) { constexpr int lane_width_in_bytes = 4; @@ -4143,7 +4143,7 @@ void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, } } -void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch) { constexpr int lane_width_in_bytes = 2; @@ -4154,7 +4154,7 @@ void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, vinserth(dst, scratch, Operand((7 - imm_lane_idx) * lane_width_in_bytes)); } -void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch) { if (src1 != dst) { @@ -4164,7 +4164,7 @@ void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, vinsertb(dst, scratch, Operand(15 - imm_lane_idx)); } -void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Register scratch3, Simd128Register scratch4) { @@ -4191,7 +4191,7 @@ void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1, } } -void TurboAssembler::I16x8Mul(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vxor(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); vmladduhm(dst, src1, src2, kSimd128RegZero); @@ -4204,7 +4204,7 @@ void TurboAssembler::I16x8Mul(Simd128Register dst, Simd128Register src1, vsel(dst, src2, result, scratch2); \ /* Use xvmindp to turn any selected SNANs to QNANs. */ \ xvmindp(dst, dst, dst); -void TurboAssembler::F64x2Min(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Min(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { xvmindp(scratch1, src1, src2); @@ -4212,7 +4212,7 @@ void TurboAssembler::F64x2Min(Simd128Register dst, Simd128Register src1, F64X2_MIN_MAX_NAN(scratch1) } -void TurboAssembler::F64x2Max(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Max(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { xvmaxdp(scratch1, src1, src2); @@ -4221,108 +4221,108 @@ void TurboAssembler::F64x2Max(Simd128Register dst, Simd128Register src1, } #undef F64X2_MIN_MAX_NAN -void TurboAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1, Simd128Register src2) { xvcmpgtdp(dst, src2, src1); } -void TurboAssembler::F64x2Le(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Le(Simd128Register dst, Simd128Register src1, Simd128Register src2) { xvcmpgedp(dst, src2, src1); } -void TurboAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { xvcmpeqdp(scratch, src1, src2); vnor(dst, scratch, scratch); } -void TurboAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1, Simd128Register src2) { xvcmpgtsp(dst, src2, src1); } -void TurboAssembler::F32x4Le(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Le(Simd128Register dst, Simd128Register src1, Simd128Register src2) { xvcmpgesp(dst, src2, src1); } -void TurboAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { xvcmpeqsp(scratch, src1, src2); vnor(dst, scratch, scratch); } -void TurboAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequd(scratch, src1, src2); vnor(dst, scratch, scratch); } -void TurboAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpgtsd(scratch, src2, src1); vnor(dst, scratch, scratch); } -void TurboAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequw(scratch, src1, src2); vnor(dst, scratch, scratch); } -void TurboAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpgtsw(scratch, src2, src1); vnor(dst, scratch, scratch); } -void TurboAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequw(scratch, src1, src2); vcmpgtuw(dst, src1, src2); vor(dst, dst, scratch); } -void TurboAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequh(scratch, src1, src2); vnor(dst, scratch, scratch); } -void TurboAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpgtsh(scratch, src2, src1); vnor(dst, scratch, scratch); } -void TurboAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequh(scratch, src1, src2); vcmpgtuh(dst, src1, src2); vor(dst, dst, scratch); } -void TurboAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequb(scratch, src1, src2); vnor(dst, scratch, scratch); } -void TurboAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpgtsb(scratch, src2, src1); vnor(dst, scratch, scratch); } -void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequb(scratch, src1, src2); vcmpgtub(dst, src1, src2); vor(dst, dst, scratch); } -void TurboAssembler::I64x2Abs(Simd128Register dst, Simd128Register src, +void MacroAssembler::I64x2Abs(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int shift_bits = 63; xxspltib(scratch, Operand(shift_bits)); @@ -4330,7 +4330,7 @@ void TurboAssembler::I64x2Abs(Simd128Register dst, Simd128Register src, vxor(dst, src, scratch); vsubudm(dst, dst, scratch); } -void TurboAssembler::I32x4Abs(Simd128Register dst, Simd128Register src, +void MacroAssembler::I32x4Abs(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int shift_bits = 31; xxspltib(scratch, Operand(shift_bits)); @@ -4338,7 +4338,7 @@ void TurboAssembler::I32x4Abs(Simd128Register dst, Simd128Register src, vxor(dst, src, scratch); vsubuwm(dst, dst, scratch); } -void TurboAssembler::I16x8Abs(Simd128Register dst, Simd128Register src, +void MacroAssembler::I16x8Abs(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int shift_bits = 15; xxspltib(scratch, Operand(shift_bits)); @@ -4346,13 +4346,13 @@ void TurboAssembler::I16x8Abs(Simd128Register dst, Simd128Register src, vxor(dst, src, scratch); vsubuhm(dst, dst, scratch); } -void TurboAssembler::I16x8Neg(Simd128Register dst, Simd128Register src, +void MacroAssembler::I16x8Neg(Simd128Register dst, Simd128Register src, Simd128Register scratch) { vspltish(scratch, Operand(1)); vnor(dst, src, src); vadduhm(dst, scratch, dst); } -void TurboAssembler::I8x16Abs(Simd128Register dst, Simd128Register src, +void MacroAssembler::I8x16Abs(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int shift_bits = 7; xxspltib(scratch, Operand(shift_bits)); @@ -4360,38 +4360,38 @@ void TurboAssembler::I8x16Abs(Simd128Register dst, Simd128Register src, vxor(dst, src, scratch); vsububm(dst, dst, scratch); } -void TurboAssembler::I8x16Neg(Simd128Register dst, Simd128Register src, +void MacroAssembler::I8x16Neg(Simd128Register dst, Simd128Register src, Simd128Register scratch) { xxspltib(scratch, Operand(1)); vnor(dst, src, src); vaddubm(dst, scratch, dst); } -void TurboAssembler::F64x2Pmin(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Pmin(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { xvcmpgtdp(kScratchSimd128Reg, src1, src2); vsel(dst, src1, src2, kScratchSimd128Reg); } -void TurboAssembler::F64x2Pmax(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Pmax(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { xvcmpgtdp(kScratchSimd128Reg, src2, src1); vsel(dst, src1, src2, kScratchSimd128Reg); } -void TurboAssembler::F32x4Pmin(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Pmin(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { xvcmpgtsp(kScratchSimd128Reg, src1, src2); vsel(dst, src1, src2, kScratchSimd128Reg); } -void TurboAssembler::F32x4Pmax(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Pmax(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { xvcmpgtsp(kScratchSimd128Reg, src2, src1); vsel(dst, src1, src2, kScratchSimd128Reg); } -void TurboAssembler::I32x4SConvertF32x4(Simd128Register dst, +void MacroAssembler::I32x4SConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch) { // NaN to 0 @@ -4400,37 +4400,37 @@ void TurboAssembler::I32x4SConvertF32x4(Simd128Register dst, xvcvspsxws(dst, scratch); } -void TurboAssembler::I16x8SConvertI32x4(Simd128Register dst, +void MacroAssembler::I16x8SConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vpkswss(dst, src2, src1); } -void TurboAssembler::I16x8UConvertI32x4(Simd128Register dst, +void MacroAssembler::I16x8UConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vpkswus(dst, src2, src1); } -void TurboAssembler::I8x16SConvertI16x8(Simd128Register dst, +void MacroAssembler::I8x16SConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vpkshss(dst, src2, src1); } -void TurboAssembler::I8x16UConvertI16x8(Simd128Register dst, +void MacroAssembler::I8x16UConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vpkshus(dst, src2, src1); } -void TurboAssembler::F64x2ConvertLowI32x4S(Simd128Register dst, +void MacroAssembler::F64x2ConvertLowI32x4S(Simd128Register dst, Simd128Register src) { vupklsw(dst, src); xvcvsxddp(dst, dst); } -void TurboAssembler::F64x2ConvertLowI32x4U(Simd128Register dst, +void MacroAssembler::F64x2ConvertLowI32x4U(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4444,7 +4444,7 @@ void TurboAssembler::F64x2ConvertLowI32x4U(Simd128Register dst, xvcvuxddp(dst, dst); } -void TurboAssembler::I64x2UConvertI32x4Low(Simd128Register dst, +void MacroAssembler::I64x2UConvertI32x4Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4457,7 +4457,7 @@ void TurboAssembler::I64x2UConvertI32x4Low(Simd128Register dst, vand(dst, scratch2, dst); } -void TurboAssembler::I64x2UConvertI32x4High(Simd128Register dst, +void MacroAssembler::I64x2UConvertI32x4High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4470,7 +4470,7 @@ void TurboAssembler::I64x2UConvertI32x4High(Simd128Register dst, vand(dst, scratch2, dst); } -void TurboAssembler::I32x4UConvertI16x8Low(Simd128Register dst, +void MacroAssembler::I32x4UConvertI16x8Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4482,7 +4482,7 @@ void TurboAssembler::I32x4UConvertI16x8Low(Simd128Register dst, vand(dst, scratch2, dst); } -void TurboAssembler::I32x4UConvertI16x8High(Simd128Register dst, +void MacroAssembler::I32x4UConvertI16x8High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4494,7 +4494,7 @@ void TurboAssembler::I32x4UConvertI16x8High(Simd128Register dst, vand(dst, scratch2, dst); } -void TurboAssembler::I16x8UConvertI8x16Low(Simd128Register dst, +void MacroAssembler::I16x8UConvertI8x16Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4506,7 +4506,7 @@ void TurboAssembler::I16x8UConvertI8x16Low(Simd128Register dst, vand(dst, scratch2, dst); } -void TurboAssembler::I16x8UConvertI8x16High(Simd128Register dst, +void MacroAssembler::I16x8UConvertI8x16High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4518,7 +4518,7 @@ void TurboAssembler::I16x8UConvertI8x16High(Simd128Register dst, vand(dst, scratch2, dst); } -void TurboAssembler::I8x16BitMask(Register dst, Simd128Register src, +void MacroAssembler::I8x16BitMask(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3) { if (CpuFeatures::IsSupported(PPC_10_PLUS)) { @@ -4532,21 +4532,21 @@ void TurboAssembler::I8x16BitMask(Register dst, Simd128Register src, } } -void TurboAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vxor(scratch, scratch, scratch); vmsumshm(dst, src1, src2, scratch); } -void TurboAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vxor(scratch, scratch, scratch); vmhraddshs(dst, src1, src2, scratch); } -void TurboAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { // Saturate the indices to 5 bits. Input indices more than 31 should @@ -4559,7 +4559,7 @@ void TurboAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1, vperm(dst, dst, kSimd128RegZero, scratch); } -void TurboAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1, Simd128Register src2, uint64_t high, uint64_t low, Register scratch1, Register scratch2, Simd128Register scratch3) { @@ -4574,25 +4574,25 @@ void TurboAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1, mul_even(scratch2, src, scratch1); \ mul_odd(scratch1, src, scratch1); \ add(dst, scratch2, scratch1); -void TurboAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst, +void MacroAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { EXT_ADD_PAIRWISE(vspltish, vmulesh, vmulosh, vadduwm) } -void TurboAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst, +void MacroAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { EXT_ADD_PAIRWISE(vspltish, vmuleuh, vmulouh, vadduwm) } -void TurboAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst, +void MacroAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { EXT_ADD_PAIRWISE(xxspltib, vmulesb, vmulosb, vadduhm) } -void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst, +void MacroAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { @@ -4600,7 +4600,7 @@ void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst, } #undef EXT_ADD_PAIRWISE -void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst, +void MacroAssembler::F64x2PromoteLowF32x4(Simd128Register dst, Simd128Register src) { constexpr int lane_number = 8; vextractd(dst, src, Operand(lane_number)); @@ -4608,7 +4608,7 @@ void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst, xvcvspdp(dst, dst); } -void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, +void MacroAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int lane_number = 8; @@ -4619,7 +4619,7 @@ void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, vinsertd(dst, scratch, Operand(lane_number)); } -void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst, +void MacroAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int lane_number = 8; @@ -4633,7 +4633,7 @@ void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst, vinsertd(dst, scratch, Operand(lane_number)); } -void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst, +void MacroAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int lane_number = 8; @@ -4649,7 +4649,7 @@ void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst, #else #define MAYBE_REVERSE_BYTES(reg, instr) #endif -void TurboAssembler::LoadLane64LE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadLane64LE(Simd128Register dst, const MemOperand& mem, int lane, Register scratch1, Simd128Register scratch2) { constexpr int lane_width_in_bytes = 8; @@ -4657,7 +4657,7 @@ void TurboAssembler::LoadLane64LE(Simd128Register dst, const MemOperand& mem, MAYBE_REVERSE_BYTES(scratch2, xxbrd) vinsertd(dst, scratch2, Operand((1 - lane) * lane_width_in_bytes)); } -void TurboAssembler::LoadLane32LE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadLane32LE(Simd128Register dst, const MemOperand& mem, int lane, Register scratch1, Simd128Register scratch2) { constexpr int lane_width_in_bytes = 4; @@ -4665,7 +4665,7 @@ void TurboAssembler::LoadLane32LE(Simd128Register dst, const MemOperand& mem, MAYBE_REVERSE_BYTES(scratch2, xxbrw) vinsertw(dst, scratch2, Operand((3 - lane) * lane_width_in_bytes)); } -void TurboAssembler::LoadLane16LE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadLane16LE(Simd128Register dst, const MemOperand& mem, int lane, Register scratch1, Simd128Register scratch2) { constexpr int lane_width_in_bytes = 2; @@ -4673,7 +4673,7 @@ void TurboAssembler::LoadLane16LE(Simd128Register dst, const MemOperand& mem, MAYBE_REVERSE_BYTES(scratch2, xxbrh) vinserth(dst, scratch2, Operand((7 - lane) * lane_width_in_bytes)); } -void TurboAssembler::LoadLane8LE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadLane8LE(Simd128Register dst, const MemOperand& mem, int lane, Register scratch1, Simd128Register scratch2) { LoadSimd128Uint8(scratch2, mem, scratch1); @@ -4681,7 +4681,7 @@ void TurboAssembler::LoadLane8LE(Simd128Register dst, const MemOperand& mem, } #undef MAYBE_REVERSE_BYTES -void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src, +void MacroAssembler::V128AnyTrue(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3) { constexpr uint8_t fxm = 0x2; // field mask. @@ -4695,18 +4695,18 @@ void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src, isel(dst, scratch1, scratch2, bit_number); } -void TurboAssembler::S128Not(Simd128Register dst, Simd128Register src) { +void MacroAssembler::S128Not(Simd128Register dst, Simd128Register src) { vnor(dst, src, src); } -void TurboAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low, +void MacroAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low, Register scratch1, Register scratch2) { mov(scratch1, Operand(low)); mov(scratch2, Operand(high)); mtvsrdd(dst, scratch2, scratch1); } -void TurboAssembler::S128Select(Simd128Register dst, Simd128Register src1, +void MacroAssembler::S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask) { vsel(dst, src2, src1, mask); } @@ -4726,7 +4726,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, UNREACHABLE(); } -void TurboAssembler::SwapP(Register src, Register dst, Register scratch) { +void MacroAssembler::SwapP(Register src, Register dst, Register scratch) { if (src == dst) return; DCHECK(!AreAliased(src, dst, scratch)); mr(scratch, src); @@ -4734,7 +4734,7 @@ void TurboAssembler::SwapP(Register src, Register dst, Register scratch) { mr(dst, scratch); } -void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) { +void MacroAssembler::SwapP(Register src, MemOperand dst, Register scratch) { if (dst.ra() != r0 && dst.ra().is_valid()) DCHECK(!AreAliased(src, dst.ra(), scratch)); if (dst.rb() != r0 && dst.rb().is_valid()) @@ -4745,7 +4745,7 @@ void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) { StoreU64(scratch, dst, r0); } -void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0, +void MacroAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0, Register scratch_1) { if (src.ra() != r0 && src.ra().is_valid()) DCHECK(!AreAliased(src.ra(), scratch_0, scratch_1)); @@ -4777,7 +4777,7 @@ void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0, } } -void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst, +void MacroAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch) { if (src == dst) return; DCHECK(!AreAliased(src, dst, scratch)); @@ -4786,7 +4786,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst, fmr(dst, scratch); } -void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst, +void MacroAssembler::SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch) { DCHECK(!AreAliased(src, scratch)); fmr(scratch, src); @@ -4794,7 +4794,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst, StoreF32(scratch, dst, r0); } -void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst, +void MacroAssembler::SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch_0, DoubleRegister scratch_1) { DCHECK(!AreAliased(scratch_0, scratch_1)); @@ -4804,7 +4804,7 @@ void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst, StoreF32(scratch_1, src, r0); } -void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst, +void MacroAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch) { if (src == dst) return; DCHECK(!AreAliased(src, dst, scratch)); @@ -4813,7 +4813,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst, fmr(dst, scratch); } -void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst, +void MacroAssembler::SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch) { DCHECK(!AreAliased(src, scratch)); fmr(scratch, src); @@ -4821,7 +4821,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst, StoreF64(scratch, dst, r0); } -void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst, +void MacroAssembler::SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0, DoubleRegister scratch_1) { DCHECK(!AreAliased(scratch_0, scratch_1)); @@ -4831,7 +4831,7 @@ void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst, StoreF64(scratch_1, src, r0); } -void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst, +void MacroAssembler::SwapSimd128(Simd128Register src, Simd128Register dst, Simd128Register scratch) { if (src == dst) return; vor(scratch, src, src); @@ -4839,7 +4839,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst, vor(dst, scratch, scratch); } -void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst, +void MacroAssembler::SwapSimd128(Simd128Register src, MemOperand dst, Simd128Register scratch1, Register scratch2) { DCHECK(src != scratch1); LoadSimd128(scratch1, dst, scratch2); @@ -4847,7 +4847,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst, vor(src, scratch1, scratch1); } -void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst, +void MacroAssembler::SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch1, Simd128Register scratch2, Register scratch3) { LoadSimd128(scratch1, src, scratch3); @@ -4857,7 +4857,7 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst, StoreSimd128(scratch2, src, scratch3); } -void TurboAssembler::ByteReverseU16(Register dst, Register val, +void MacroAssembler::ByteReverseU16(Register dst, Register val, Register scratch) { if (CpuFeatures::IsSupported(PPC_10_PLUS)) { brh(dst, val); @@ -4870,7 +4870,7 @@ void TurboAssembler::ByteReverseU16(Register dst, Register val, ZeroExtHalfWord(dst, dst); } -void TurboAssembler::ByteReverseU32(Register dst, Register val, +void MacroAssembler::ByteReverseU32(Register dst, Register val, Register scratch) { if (CpuFeatures::IsSupported(PPC_10_PLUS)) { brw(dst, val); @@ -4883,7 +4883,7 @@ void TurboAssembler::ByteReverseU32(Register dst, Register val, ZeroExtWord32(dst, scratch); } -void TurboAssembler::ByteReverseU64(Register dst, Register val, Register) { +void MacroAssembler::ByteReverseU64(Register dst, Register val, Register) { if (CpuFeatures::IsSupported(PPC_10_PLUS)) { brd(dst, val); return; @@ -4894,17 +4894,17 @@ void TurboAssembler::ByteReverseU64(Register dst, Register val, Register) { addi(sp, sp, Operand(kSystemPointerSize)); } -void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { CmpS64(x, Operand(y), r0); beq(dest); } -void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { CmpS64(x, Operand(y), r0); blt(dest); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { static_assert(kSystemPointerSize == 8); static_assert(kSmiTagSize == 1); static_assert(kSmiTag == 0); @@ -4923,31 +4923,31 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { LoadU64(builtin_index, MemOperand(kRootRegister, builtin_index)); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { LoadEntryFromBuiltinIndex(builtin_index); Call(builtin_index); } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { ASM_CODE_COMMENT(this); LoadU64(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { ASM_CODE_COMMENT(this); DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) { ASM_CODE_COMMENT(this); LoadU64(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset), r0); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -4957,20 +4957,20 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object) { ASM_CODE_COMMENT(this); LoadCodeEntry(code_object, code_object); Call(code_object); } -void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); LoadCodeEntry(code_object, code_object); Jump(code_object); } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. // @@ -5004,7 +5004,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { SizeOfCodeGeneratedSince(&start_call)); } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5017,30 +5017,30 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::ZeroExtByte(Register dst, Register src) { +void MacroAssembler::ZeroExtByte(Register dst, Register src) { clrldi(dst, src, Operand(56)); } -void TurboAssembler::ZeroExtHalfWord(Register dst, Register src) { +void MacroAssembler::ZeroExtHalfWord(Register dst, Register src) { clrldi(dst, src, Operand(48)); } -void TurboAssembler::ZeroExtWord32(Register dst, Register src) { +void MacroAssembler::ZeroExtWord32(Register dst, Register src) { clrldi(dst, src, Operand(32)); } -void TurboAssembler::Trap() { stop(); } -void TurboAssembler::DebugBreak() { stop(); } +void MacroAssembler::Trap() { stop(); } +void MacroAssembler::DebugBreak() { stop(); } -void TurboAssembler::Popcnt32(Register dst, Register src) { popcntw(dst, src); } +void MacroAssembler::Popcnt32(Register dst, Register src) { popcntw(dst, src); } -void TurboAssembler::Popcnt64(Register dst, Register src) { popcntd(dst, src); } +void MacroAssembler::Popcnt64(Register dst, Register src) { popcntd(dst, src); } -void TurboAssembler::CountLeadingZerosU32(Register dst, Register src, RCBit r) { +void MacroAssembler::CountLeadingZerosU32(Register dst, Register src, RCBit r) { cntlzw(dst, src, r); } -void TurboAssembler::CountLeadingZerosU64(Register dst, Register src, RCBit r) { +void MacroAssembler::CountLeadingZerosU64(Register dst, Register src, RCBit r) { cntlzd(dst, src, r); } @@ -5057,7 +5057,7 @@ void TurboAssembler::CountLeadingZerosU64(Register dst, Register src, RCBit r) { addi(dst, dst, Operand(1)); /* dst++ */ \ bdnz(&loop); \ bind(&done); -void TurboAssembler::CountTrailingZerosU32(Register dst, Register src, +void MacroAssembler::CountTrailingZerosU32(Register dst, Register src, Register scratch1, Register scratch2, RCBit r) { if (CpuFeatures::IsSupported(PPC_9_PLUS)) { @@ -5067,7 +5067,7 @@ void TurboAssembler::CountTrailingZerosU32(Register dst, Register src, } } -void TurboAssembler::CountTrailingZerosU64(Register dst, Register src, +void MacroAssembler::CountTrailingZerosU64(Register dst, Register src, Register scratch1, Register scratch2, RCBit r) { if (CpuFeatures::IsSupported(PPC_9_PLUS)) { @@ -5078,14 +5078,14 @@ void TurboAssembler::CountTrailingZerosU64(Register dst, Register src, } #undef COUNT_TRAILING_ZEROES_SLOW -void TurboAssembler::ClearByteU64(Register dst, int byte_idx) { +void MacroAssembler::ClearByteU64(Register dst, int byte_idx) { CHECK(0 <= byte_idx && byte_idx <= 7); int shift = byte_idx*8; rldicl(dst, dst, shift, 8); rldicl(dst, dst, 64-shift, 0); } -void TurboAssembler::ReverseBitsU64(Register dst, Register src, +void MacroAssembler::ReverseBitsU64(Register dst, Register src, Register scratch1, Register scratch2) { ByteReverseU64(dst, src); for (int i = 0; i < 8; i++) { @@ -5093,7 +5093,7 @@ void TurboAssembler::ReverseBitsU64(Register dst, Register src, } } -void TurboAssembler::ReverseBitsU32(Register dst, Register src, +void MacroAssembler::ReverseBitsU32(Register dst, Register src, Register scratch1, Register scratch2) { ByteReverseU32(dst, src, scratch1); for (int i = 4; i < 8; i++) { @@ -5102,7 +5102,7 @@ void TurboAssembler::ReverseBitsU32(Register dst, Register src, } // byte_idx=7 refers to least significant byte -void TurboAssembler::ReverseBitsInSingleByteU64(Register dst, Register src, +void MacroAssembler::ReverseBitsInSingleByteU64(Register dst, Register src, Register scratch1, Register scratch2, int byte_idx) { diff --git a/src/codegen/ppc/macro-assembler-ppc.h b/src/codegen/ppc/macro-assembler-ppc.h index f85f4f3c57..08a075a477 100644 --- a/src/codegen/ppc/macro-assembler-ppc.h +++ b/src/codegen/ppc/macro-assembler-ppc.h @@ -47,9 +47,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, #define ClearRightImm clrrwi #endif -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; void CallBuiltin(Builtin builtin, Condition cond = al); void TailCallBuiltin(Builtin builtin, Condition cond = al, @@ -1438,21 +1438,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask); - private: - static const int kSmiShift = kSmiTagSize + kSmiShiftSize; - - int CalculateStackPassedWords(int num_reg_arguments, - int num_double_arguments); - void CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments, - bool has_function_descriptor); -}; - -// MacroAssembler implements a collection of frequently used acros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - // It assumes that the arguments are located below the stack pointer. // argc is the number of arguments not including the receiver. // TODO(victorgomes): Remove this function once we stick with the reversed @@ -1745,6 +1730,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { private: static const int kSmiShift = kSmiTagSize + kSmiShiftSize; + int CalculateStackPassedWords(int num_reg_arguments, + int num_double_arguments); + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments, + bool has_function_descriptor); + // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, Label* done, diff --git a/src/codegen/riscv/macro-assembler-riscv.cc b/src/codegen/riscv/macro-assembler-riscv.cc index ef69c4adba..1bd47ed66a 100644 --- a/src/codegen/riscv/macro-assembler-riscv.cc +++ b/src/codegen/riscv/macro-assembler-riscv.cc @@ -41,7 +41,7 @@ static inline bool IsZero(const Operand& rt) { } } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const { @@ -58,7 +58,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; @@ -75,7 +75,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; if (fp_mode == SaveFPRegsMode::kSave) { @@ -246,12 +246,12 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( temps.Acquire()); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadRoot(Register destination, RootIndex index) { LoadWord(destination, MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index, +void MacroAssembler::LoadRoot(Register destination, RootIndex index, Condition cond, Register src1, const Operand& src2) { Label skip; @@ -261,7 +261,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index, bind(&skip); } -void TurboAssembler::PushCommonFrame(Register marker_reg) { +void MacroAssembler::PushCommonFrame(Register marker_reg) { if (marker_reg.is_valid()) { Push(ra, fp, marker_reg); AddWord(fp, sp, Operand(kSystemPointerSize)); @@ -271,7 +271,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { } } -void TurboAssembler::PushStandardFrame(Register function_reg) { +void MacroAssembler::PushStandardFrame(Register function_reg) { int offset = -StandardFrameConstants::kContextOffset; if (function_reg.is_valid()) { Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister); @@ -328,17 +328,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset, bind(&done); } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; MultiPush(registers); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { if (registers.is_empty()) return; MultiPop(registers); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode) { DCHECK(!AreAliased(object, slot_address)); @@ -361,7 +361,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -384,7 +384,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { // Use CallRecordWriteStubSaveRegisters if the object and slot registers @@ -469,7 +469,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, // --------------------------------------------------------------------------- // Instruction macros. #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Add32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -499,7 +499,7 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sub32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -541,15 +541,15 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::AddWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::AddWord(Register rd, Register rs, const Operand& rt) { Add64(rd, rs, rt); } -void TurboAssembler::SubWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SubWord(Register rd, Register rs, const Operand& rt) { Sub64(rd, rs, rt); } -void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sub64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -598,7 +598,7 @@ void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Add64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && (rt.rm() != zero_reg) && (rs != zero_reg)) { @@ -638,7 +638,7 @@ void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mul32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mulw(rd, rs, rt.rm()); } else { @@ -650,7 +650,7 @@ void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mulh32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mul(rd, rs, rt.rm()); } else { @@ -663,7 +663,7 @@ void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) { srai(rd, rd, 32); } -void TurboAssembler::Mulhu32(Register rd, Register rs, const Operand& rt, +void MacroAssembler::Mulhu32(Register rd, Register rs, const Operand& rt, Register rsz, Register rtz) { slli(rsz, rs, 32); if (rt.is_reg()) { @@ -675,7 +675,7 @@ void TurboAssembler::Mulhu32(Register rd, Register rs, const Operand& rt, srai(rd, rd, 32); } -void TurboAssembler::Mul64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mul64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mul(rd, rs, rt.rm()); } else { @@ -687,7 +687,7 @@ void TurboAssembler::Mul64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulh64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mulh64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mulh(rd, rs, rt.rm()); } else { @@ -699,7 +699,7 @@ void TurboAssembler::Mulh64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulhu64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mulhu64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mulhu(rd, rs, rt.rm()); } else { @@ -711,7 +711,7 @@ void TurboAssembler::Mulhu64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Div32(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Div32(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { divw(res, rs, rt.rm()); } else { @@ -723,7 +723,7 @@ void TurboAssembler::Div32(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Mod32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mod32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { remw(rd, rs, rt.rm()); } else { @@ -735,7 +735,7 @@ void TurboAssembler::Mod32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Modu32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Modu32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { remuw(rd, rs, rt.rm()); } else { @@ -747,7 +747,7 @@ void TurboAssembler::Modu32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Div64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Div64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { div(rd, rs, rt.rm()); } else { @@ -759,7 +759,7 @@ void TurboAssembler::Div64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Divu32(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Divu32(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { divuw(res, rs, rt.rm()); } else { @@ -771,7 +771,7 @@ void TurboAssembler::Divu32(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Divu64(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Divu64(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { divu(res, rs, rt.rm()); } else { @@ -783,7 +783,7 @@ void TurboAssembler::Divu64(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Mod64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mod64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { rem(rd, rs, rt.rm()); } else { @@ -795,7 +795,7 @@ void TurboAssembler::Mod64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Modu64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Modu64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { remu(rd, rs, rt.rm()); } else { @@ -807,11 +807,11 @@ void TurboAssembler::Modu64(Register rd, Register rs, const Operand& rt) { } } #elif V8_TARGET_ARCH_RISCV32 -void TurboAssembler::AddWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::AddWord(Register rd, Register rs, const Operand& rt) { Add32(rd, rs, rt); } -void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Add32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && (rt.rm() != zero_reg) && (rs != zero_reg)) { @@ -851,11 +851,11 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::SubWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SubWord(Register rd, Register rs, const Operand& rt) { Sub32(rd, rs, rt); } -void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sub32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -905,11 +905,11 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mul32(Register rd, Register rs, const Operand& rt) { Mul(rd, rs, rt); } -void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mul(rd, rs, rt.rm()); } else { @@ -921,7 +921,7 @@ void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mulh(rd, rs, rt.rm()); } else { @@ -933,7 +933,7 @@ void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt, +void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt, Register rsz, Register rtz) { if (rt.is_reg()) { mulhu(rd, rs, rt.rm()); @@ -946,7 +946,7 @@ void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt, } } -void TurboAssembler::Div(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Div(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { div(res, rs, rt.rm()); } else { @@ -958,7 +958,7 @@ void TurboAssembler::Div(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { rem(rd, rs, rt.rm()); } else { @@ -970,7 +970,7 @@ void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { remu(rd, rs, rt.rm()); } else { @@ -982,7 +982,7 @@ void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { divu(res, rs, rt.rm()); } else { @@ -996,7 +996,7 @@ void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) { #endif -void TurboAssembler::And(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -1022,7 +1022,7 @@ void TurboAssembler::And(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -1044,7 +1044,7 @@ void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -1066,7 +1066,7 @@ void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { or_(rd, rs, rt.rm()); not_(rd, rd); @@ -1076,12 +1076,12 @@ void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Neg(Register rs, const Operand& rt) { +void MacroAssembler::Neg(Register rs, const Operand& rt) { DCHECK(rt.is_reg()); neg(rs, rt.rm()); } -void TurboAssembler::Seqz(Register rd, const Operand& rt) { +void MacroAssembler::Seqz(Register rd, const Operand& rt) { if (rt.is_reg()) { seqz(rd, rt.rm()); } else { @@ -1089,7 +1089,7 @@ void TurboAssembler::Seqz(Register rd, const Operand& rt) { } } -void TurboAssembler::Snez(Register rd, const Operand& rt) { +void MacroAssembler::Snez(Register rd, const Operand& rt) { if (rt.is_reg()) { snez(rd, rt.rm()); } else { @@ -1097,7 +1097,7 @@ void TurboAssembler::Snez(Register rd, const Operand& rt) { } } -void TurboAssembler::Seq(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Seq(Register rd, Register rs, const Operand& rt) { if (rs == zero_reg) { Seqz(rd, rt); } else if (IsZero(rt)) { @@ -1108,7 +1108,7 @@ void TurboAssembler::Seq(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sne(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sne(Register rd, Register rs, const Operand& rt) { if (rs == zero_reg) { Snez(rd, rt); } else if (IsZero(rt)) { @@ -1119,7 +1119,7 @@ void TurboAssembler::Sne(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { slt(rd, rs, rt.rm()); } else { @@ -1136,7 +1136,7 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sltu(rd, rs, rt.rm()); } else { @@ -1153,7 +1153,7 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sle(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { slt(rd, rt.rm(), rs); } else { @@ -1167,7 +1167,7 @@ void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) { xori(rd, rd, 1); } -void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sleu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sltu(rd, rt.rm(), rs); } else { @@ -1181,17 +1181,17 @@ void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) { xori(rd, rd, 1); } -void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sge(Register rd, Register rs, const Operand& rt) { Slt(rd, rs, rt); xori(rd, rd, 1); } -void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sgeu(Register rd, Register rs, const Operand& rt) { Sltu(rd, rs, rt); xori(rd, rd, 1); } -void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sgt(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { slt(rd, rt.rm(), rs); } else { @@ -1204,7 +1204,7 @@ void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sltu(rd, rt.rm(), rs); } else { @@ -1218,7 +1218,7 @@ void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sll32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sllw(rd, rs, rt.rm()); } else { @@ -1227,7 +1227,7 @@ void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sra32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sraw(rd, rs, rt.rm()); } else { @@ -1236,7 +1236,7 @@ void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Srl32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { srlw(rd, rs, rt.rm()); } else { @@ -1245,11 +1245,11 @@ void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::SraWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SraWord(Register rd, Register rs, const Operand& rt) { Sra64(rd, rs, rt); } -void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sra64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sra(rd, rs, rt.rm()); } else if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && @@ -1262,11 +1262,11 @@ void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::SrlWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SrlWord(Register rd, Register rs, const Operand& rt) { Srl64(rd, rs, rt); } -void TurboAssembler::Srl64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Srl64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { srl(rd, rs, rt.rm()); } else if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && @@ -1279,11 +1279,11 @@ void TurboAssembler::Srl64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::SllWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SllWord(Register rd, Register rs, const Operand& rt) { Sll64(rd, rs, rt); } -void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sll64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sll(rd, rs, rt.rm()); } else { @@ -1297,7 +1297,7 @@ void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -1322,7 +1322,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -1345,11 +1345,11 @@ void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) { } } #elif V8_TARGET_ARCH_RISCV32 -void TurboAssembler::SllWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SllWord(Register rd, Register rs, const Operand& rt) { Sll32(rd, rs, rt); } -void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sll32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sll(rd, rs, rt.rm()); } else { @@ -1358,11 +1358,11 @@ void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::SraWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SraWord(Register rd, Register rs, const Operand& rt) { Sra32(rd, rs, rt); } -void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sra32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sra(rd, rs, rt.rm()); } else { @@ -1371,11 +1371,11 @@ void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::SrlWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SrlWord(Register rd, Register rs, const Operand& rt) { Srl32(rd, rs, rt); } -void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Srl32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { srl(rd, rs, rt.rm()); } else { @@ -1384,7 +1384,7 @@ void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -1408,7 +1408,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { } #endif -void TurboAssembler::Li(Register rd, intptr_t imm) { +void MacroAssembler::Li(Register rd, intptr_t imm) { if (v8_flags.riscv_c_extension && (rd != zero_reg) && is_int6(imm)) { c_li(rd, imm); } else { @@ -1416,7 +1416,7 @@ void TurboAssembler::Li(Register rd, intptr_t imm) { } } -void TurboAssembler::Mv(Register rd, const Operand& rt) { +void MacroAssembler::Mv(Register rd, const Operand& rt) { if (v8_flags.riscv_c_extension && (rd != zero_reg) && (rt.rm() != zero_reg)) { c_mv(rd, rt.rm()); } else { @@ -1424,7 +1424,7 @@ void TurboAssembler::Mv(Register rd, const Operand& rt) { } } -void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs, +void MacroAssembler::CalcScaledAddress(Register rd, Register rt, Register rs, uint8_t sa) { DCHECK(sa >= 1 && sa <= 31); UseScratchRegisterScope temps(this); @@ -1437,7 +1437,7 @@ void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs, // ------------Pseudo-instructions------------- // Change endianness #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size, +void MacroAssembler::ByteSwap(Register rd, Register rs, int operand_size, Register scratch) { DCHECK_NE(scratch, rs); DCHECK_NE(scratch, rd); @@ -1495,7 +1495,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size, } #elif V8_TARGET_ARCH_RISCV32 -void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size, +void MacroAssembler::ByteSwap(Register rd, Register rs, int operand_size, Register scratch) { DCHECK_NE(scratch, rs); DCHECK_NE(scratch, rd); @@ -1522,7 +1522,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size, #endif template -void TurboAssembler::LoadNBytes(Register rd, const MemOperand& rs, +void MacroAssembler::LoadNBytes(Register rd, const MemOperand& rs, Register scratch) { DCHECK(rd != rs.rm() && rd != scratch); DCHECK_LE(NBYTES, 8); @@ -1544,7 +1544,7 @@ void TurboAssembler::LoadNBytes(Register rd, const MemOperand& rs, } template -void TurboAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs, +void MacroAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs, Register scratch0, Register scratch1) { // This function loads nbytes from memory specified by rs and into rs.rm() @@ -1573,7 +1573,7 @@ void TurboAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs, } template -void TurboAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) { +void MacroAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) { BlockTrampolinePoolScope block_trampoline_pool(this); UseScratchRegisterScope temps(this); @@ -1604,7 +1604,7 @@ void TurboAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) { #if V8_TARGET_ARCH_RISCV64 template -void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, +void MacroAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, Register scratch_base) { DCHECK(NBYTES == 4 || NBYTES == 8); DCHECK_NE(scratch_base, rs.rm()); @@ -1629,7 +1629,7 @@ void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, } #elif V8_TARGET_ARCH_RISCV32 template -void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, +void MacroAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, Register scratch_base) { DCHECK_EQ(NBYTES, 4); DCHECK_NE(scratch_base, rs.rm()); @@ -1650,7 +1650,7 @@ void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, fmv_w_x(frd, scratch); } -void TurboAssembler::UnalignedDoubleHelper(FPURegister frd, +void MacroAssembler::UnalignedDoubleHelper(FPURegister frd, const MemOperand& rs, Register scratch_base) { DCHECK_NE(scratch_base, rs.rm()); @@ -1679,7 +1679,7 @@ void TurboAssembler::UnalignedDoubleHelper(FPURegister frd, #endif template -void TurboAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs, +void MacroAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs, Register scratch_other) { DCHECK(scratch_other != rs.rm()); DCHECK_LE(NBYTES, 8); @@ -1718,7 +1718,7 @@ void TurboAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs, #if V8_TARGET_ARCH_RISCV64 template -void TurboAssembler::UnalignedFStoreHelper(FPURegister frd, +void MacroAssembler::UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs, Register scratch) { DCHECK(NBYTES == 8 || NBYTES == 4); @@ -1732,7 +1732,7 @@ void TurboAssembler::UnalignedFStoreHelper(FPURegister frd, } #elif V8_TARGET_ARCH_RISCV32 template -void TurboAssembler::UnalignedFStoreHelper(FPURegister frd, +void MacroAssembler::UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs, Register scratch) { DCHECK_EQ(NBYTES, 4); @@ -1740,7 +1740,7 @@ void TurboAssembler::UnalignedFStoreHelper(FPURegister frd, fmv_x_w(scratch, frd); UnalignedStoreHelper(scratch, rs); } -void TurboAssembler::UnalignedDStoreHelper(FPURegister frd, +void MacroAssembler::UnalignedDStoreHelper(FPURegister frd, const MemOperand& rs, Register scratch) { DCHECK_NE(scratch, rs.rm()); @@ -1757,7 +1757,7 @@ void TurboAssembler::UnalignedDStoreHelper(FPURegister frd, #endif template -void TurboAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs, +void MacroAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs, Func generator) { MemOperand source = rs; UseScratchRegisterScope temps(this); @@ -1771,7 +1771,7 @@ void TurboAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs, } template -void TurboAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs, +void MacroAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs, Func generator) { MemOperand source = rs; UseScratchRegisterScope temps(this); @@ -1787,32 +1787,32 @@ void TurboAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs, generator(value, source); } -void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { UnalignedLoadHelper<4, true>(rd, rs); } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) { UnalignedLoadHelper<4, false>(rd, rs); } #endif -void TurboAssembler::Usw(Register rd, const MemOperand& rs) { +void MacroAssembler::Usw(Register rd, const MemOperand& rs) { UnalignedStoreHelper<4>(rd, rs); } -void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulh(Register rd, const MemOperand& rs) { UnalignedLoadHelper<2, true>(rd, rs); } -void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) { UnalignedLoadHelper<2, false>(rd, rs); } -void TurboAssembler::Ush(Register rd, const MemOperand& rs) { +void MacroAssembler::Ush(Register rd, const MemOperand& rs) { UnalignedStoreHelper<2>(rd, rs); } -void TurboAssembler::Uld(Register rd, const MemOperand& rs) { +void MacroAssembler::Uld(Register rd, const MemOperand& rs) { UnalignedLoadHelper<8, true>(rd, rs); } #if V8_TARGET_ARCH_RISCV64 @@ -1838,23 +1838,23 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) { } #endif -void TurboAssembler::Usd(Register rd, const MemOperand& rs) { +void MacroAssembler::Usd(Register rd, const MemOperand& rs) { UnalignedStoreHelper<8>(rd, rs); } -void TurboAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs, +void MacroAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs, Register scratch) { DCHECK_NE(scratch, rs.rm()); UnalignedFLoadHelper<4>(fd, rs, scratch); } -void TurboAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs, +void MacroAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs, Register scratch) { DCHECK_NE(scratch, rs.rm()); UnalignedFStoreHelper<4>(fd, rs, scratch); } -void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs, +void MacroAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs, Register scratch) { DCHECK_NE(scratch, rs.rm()); #if V8_TARGET_ARCH_RISCV64 @@ -1864,7 +1864,7 @@ void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs, #endif } -void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs, +void MacroAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs, Register scratch) { DCHECK_NE(scratch, rs.rm()); #if V8_TARGET_ARCH_RISCV64 @@ -1874,49 +1874,49 @@ void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs, #endif } -void TurboAssembler::Lb(Register rd, const MemOperand& rs) { +void MacroAssembler::Lb(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { this->lb(target, source.rm(), source.offset()); }; AlignedLoadHelper(rd, rs, fn); } -void TurboAssembler::Lbu(Register rd, const MemOperand& rs) { +void MacroAssembler::Lbu(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { this->lbu(target, source.rm(), source.offset()); }; AlignedLoadHelper(rd, rs, fn); } -void TurboAssembler::Sb(Register rd, const MemOperand& rs) { +void MacroAssembler::Sb(Register rd, const MemOperand& rs) { auto fn = [this](Register value, const MemOperand& source) { this->sb(value, source.rm(), source.offset()); }; AlignedStoreHelper(rd, rs, fn); } -void TurboAssembler::Lh(Register rd, const MemOperand& rs) { +void MacroAssembler::Lh(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { this->lh(target, source.rm(), source.offset()); }; AlignedLoadHelper(rd, rs, fn); } -void TurboAssembler::Lhu(Register rd, const MemOperand& rs) { +void MacroAssembler::Lhu(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { this->lhu(target, source.rm(), source.offset()); }; AlignedLoadHelper(rd, rs, fn); } -void TurboAssembler::Sh(Register rd, const MemOperand& rs) { +void MacroAssembler::Sh(Register rd, const MemOperand& rs) { auto fn = [this](Register value, const MemOperand& source) { this->sh(value, source.rm(), source.offset()); }; AlignedStoreHelper(rd, rs, fn); } -void TurboAssembler::Lw(Register rd, const MemOperand& rs) { +void MacroAssembler::Lw(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) && ((source.rm().code() & 0b11000) == 0b01000) && @@ -1934,14 +1934,14 @@ void TurboAssembler::Lw(Register rd, const MemOperand& rs) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Lwu(Register rd, const MemOperand& rs) { +void MacroAssembler::Lwu(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { this->lwu(target, source.rm(), source.offset()); }; AlignedLoadHelper(rd, rs, fn); } #endif -void TurboAssembler::Sw(Register rd, const MemOperand& rs) { +void MacroAssembler::Sw(Register rd, const MemOperand& rs) { auto fn = [this](Register value, const MemOperand& source) { if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) && ((source.rm().code() & 0b11000) == 0b01000) && @@ -1958,7 +1958,7 @@ void TurboAssembler::Sw(Register rd, const MemOperand& rs) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Ld(Register rd, const MemOperand& rs) { +void MacroAssembler::Ld(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) && ((source.rm().code() & 0b11000) == 0b01000) && @@ -1975,7 +1975,7 @@ void TurboAssembler::Ld(Register rd, const MemOperand& rs) { AlignedLoadHelper(rd, rs, fn); } -void TurboAssembler::Sd(Register rd, const MemOperand& rs) { +void MacroAssembler::Sd(Register rd, const MemOperand& rs) { auto fn = [this](Register value, const MemOperand& source) { if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) && ((source.rm().code() & 0b11000) == 0b01000) && @@ -1991,21 +1991,21 @@ void TurboAssembler::Sd(Register rd, const MemOperand& rs) { AlignedStoreHelper(rd, rs, fn); } #endif -void TurboAssembler::LoadFloat(FPURegister fd, const MemOperand& src) { +void MacroAssembler::LoadFloat(FPURegister fd, const MemOperand& src) { auto fn = [this](FPURegister target, const MemOperand& source) { this->flw(target, source.rm(), source.offset()); }; AlignedLoadHelper(fd, src, fn); } -void TurboAssembler::StoreFloat(FPURegister fs, const MemOperand& src) { +void MacroAssembler::StoreFloat(FPURegister fs, const MemOperand& src) { auto fn = [this](FPURegister value, const MemOperand& source) { this->fsw(value, source.rm(), source.offset()); }; AlignedStoreHelper(fs, src, fn); } -void TurboAssembler::LoadDouble(FPURegister fd, const MemOperand& src) { +void MacroAssembler::LoadDouble(FPURegister fd, const MemOperand& src) { auto fn = [this](FPURegister target, const MemOperand& source) { if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) && ((source.rm().code() & 0b11000) == 0b01000) && @@ -2021,7 +2021,7 @@ void TurboAssembler::LoadDouble(FPURegister fd, const MemOperand& src) { AlignedLoadHelper(fd, src, fn); } -void TurboAssembler::StoreDouble(FPURegister fs, const MemOperand& src) { +void MacroAssembler::StoreDouble(FPURegister fs, const MemOperand& src) { auto fn = [this](FPURegister value, const MemOperand& source) { if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) && ((source.rm().code() & 0b11000) == 0b01000) && @@ -2037,7 +2037,7 @@ void TurboAssembler::StoreDouble(FPURegister fs, const MemOperand& src) { AlignedStoreHelper(fs, src, fn); } -void TurboAssembler::Ll(Register rd, const MemOperand& rs) { +void MacroAssembler::Ll(Register rd, const MemOperand& rs) { bool is_one_instruction = rs.offset() == 0; if (is_one_instruction) { lr_w(false, false, rd, rs.rm()); @@ -2050,7 +2050,7 @@ void TurboAssembler::Ll(Register rd, const MemOperand& rs) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Lld(Register rd, const MemOperand& rs) { +void MacroAssembler::Lld(Register rd, const MemOperand& rs) { bool is_one_instruction = rs.offset() == 0; if (is_one_instruction) { lr_d(false, false, rd, rs.rm()); @@ -2062,7 +2062,7 @@ void TurboAssembler::Lld(Register rd, const MemOperand& rs) { } } #endif -void TurboAssembler::Sc(Register rd, const MemOperand& rs) { +void MacroAssembler::Sc(Register rd, const MemOperand& rs) { bool is_one_instruction = rs.offset() == 0; if (is_one_instruction) { sc_w(false, false, rd, rs.rm(), rd); @@ -2074,7 +2074,7 @@ void TurboAssembler::Sc(Register rd, const MemOperand& rs) { } } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Scd(Register rd, const MemOperand& rs) { +void MacroAssembler::Scd(Register rd, const MemOperand& rs) { bool is_one_instruction = rs.offset() == 0; if (is_one_instruction) { sc_d(false, false, rd, rs.rm(), rd); @@ -2086,7 +2086,7 @@ void TurboAssembler::Scd(Register rd, const MemOperand& rs) { } } #endif -void TurboAssembler::li(Register dst, Handle value, +void MacroAssembler::li(Register dst, Handle value, RelocInfo::Mode rmode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than @@ -2104,7 +2104,7 @@ void TurboAssembler::li(Register dst, Handle value, } } -void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) { +void MacroAssembler::li(Register dst, ExternalReference value, LiFlags mode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -2124,7 +2124,7 @@ static inline int InstrCountForLiLower32Bit(int64_t value) { return 2; } -int TurboAssembler::InstrCountForLi64Bit(int64_t value) { +int MacroAssembler::InstrCountForLi64Bit(int64_t value) { if (is_int32(value + 0x800)) { return InstrCountForLiLower32Bit(value); } else { @@ -2134,14 +2134,14 @@ int TurboAssembler::InstrCountForLi64Bit(int64_t value) { return INT_MAX; } -void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { +void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { DCHECK(!j.is_reg()); DCHECK(!MustUseReg(j.rmode())); DCHECK(mode == OPTIMIZE_SIZE); Li(rd, j.immediate()); } -void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { +void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { DCHECK(!j.is_reg()); BlockTrampolinePoolScope block_trampoline_pool(this); if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { @@ -2188,7 +2188,7 @@ static RegList t_regs = {t0, t1, t2, t3, t4, t5, t6}; static RegList a_regs = {a0, a1, a2, a3, a4, a5, a6, a7}; static RegList s_regs = {s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11}; -void TurboAssembler::MultiPush(RegList regs) { +void MacroAssembler::MultiPush(RegList regs) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kSystemPointerSize; @@ -2232,7 +2232,7 @@ void TurboAssembler::MultiPush(RegList regs) { #undef S_REGS } -void TurboAssembler::MultiPop(RegList regs) { +void MacroAssembler::MultiPop(RegList regs) { int16_t stack_offset = 0; #define TEST_AND_POP_REG(reg) \ @@ -2273,7 +2273,7 @@ void TurboAssembler::MultiPop(RegList regs) { #undef A_REGS } -void TurboAssembler::MultiPushFPU(DoubleRegList regs) { +void MacroAssembler::MultiPushFPU(DoubleRegList regs) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kDoubleSize; @@ -2286,7 +2286,7 @@ void TurboAssembler::MultiPushFPU(DoubleRegList regs) { } } -void TurboAssembler::MultiPopFPU(DoubleRegList regs) { +void MacroAssembler::MultiPopFPU(DoubleRegList regs) { int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -2299,7 +2299,7 @@ void TurboAssembler::MultiPopFPU(DoubleRegList regs) { } #if V8_TARGET_ARCH_RISCV32 -void TurboAssembler::AddPair(Register dst_low, Register dst_high, +void MacroAssembler::AddPair(Register dst_low, Register dst_high, Register left_low, Register left_high, Register right_low, Register right_high, Register scratch1, Register scratch2) { @@ -2317,7 +2317,7 @@ void TurboAssembler::AddPair(Register dst_low, Register dst_high, Move(dst_low, scratch1); } -void TurboAssembler::SubPair(Register dst_low, Register dst_high, +void MacroAssembler::SubPair(Register dst_low, Register dst_high, Register left_low, Register left_high, Register right_low, Register right_high, Register scratch1, Register scratch2) { @@ -2335,27 +2335,27 @@ void TurboAssembler::SubPair(Register dst_low, Register dst_high, Move(dst_low, scratch1); } -void TurboAssembler::AndPair(Register dst_low, Register dst_high, +void MacroAssembler::AndPair(Register dst_low, Register dst_high, Register left_low, Register left_high, Register right_low, Register right_high) { And(dst_low, left_low, right_low); And(dst_high, left_high, right_high); } -void TurboAssembler::OrPair(Register dst_low, Register dst_high, +void MacroAssembler::OrPair(Register dst_low, Register dst_high, Register left_low, Register left_high, Register right_low, Register right_high) { Or(dst_low, left_low, right_low); Or(dst_high, left_high, right_high); } -void TurboAssembler::XorPair(Register dst_low, Register dst_high, +void MacroAssembler::XorPair(Register dst_low, Register dst_high, Register left_low, Register left_high, Register right_low, Register right_high) { Xor(dst_low, left_low, right_low); Xor(dst_high, left_high, right_high); } -void TurboAssembler::MulPair(Register dst_low, Register dst_high, +void MacroAssembler::MulPair(Register dst_low, Register dst_high, Register left_low, Register left_high, Register right_low, Register right_high, Register scratch1, Register scratch2) { @@ -2381,7 +2381,7 @@ void TurboAssembler::MulPair(Register dst_low, Register dst_high, Add32(dst_high, scratch2, scratch3); } -void TurboAssembler::ShlPair(Register dst_low, Register dst_high, +void MacroAssembler::ShlPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift, Register scratch1, Register scratch2) { @@ -2426,7 +2426,7 @@ void TurboAssembler::ShlPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::ShlPair(Register dst_low, Register dst_high, +void MacroAssembler::ShlPair(Register dst_low, Register dst_high, Register src_low, Register src_high, int32_t shift, Register scratch1, Register scratch2) { DCHECK_GE(63, shift); @@ -2451,7 +2451,7 @@ void TurboAssembler::ShlPair(Register dst_low, Register dst_high, } } -void TurboAssembler::ShrPair(Register dst_low, Register dst_high, +void MacroAssembler::ShrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift, Register scratch1, Register scratch2) { @@ -2496,7 +2496,7 @@ void TurboAssembler::ShrPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::ShrPair(Register dst_low, Register dst_high, +void MacroAssembler::ShrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, int32_t shift, Register scratch1, Register scratch2) { DCHECK_GE(63, shift); @@ -2521,7 +2521,7 @@ void TurboAssembler::ShrPair(Register dst_low, Register dst_high, } } -void TurboAssembler::SarPair(Register dst_low, Register dst_high, +void MacroAssembler::SarPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift, Register scratch1, Register scratch2) { @@ -2564,7 +2564,7 @@ void TurboAssembler::SarPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::SarPair(Register dst_low, Register dst_high, +void MacroAssembler::SarPair(Register dst_low, Register dst_high, Register src_low, Register src_high, int32_t shift, Register scratch1, Register scratch2) { DCHECK_GE(63, shift); @@ -2589,7 +2589,7 @@ void TurboAssembler::SarPair(Register dst_low, Register dst_high, } #endif -void TurboAssembler::ExtractBits(Register rt, Register rs, uint16_t pos, +void MacroAssembler::ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size, bool sign_extend) { #if V8_TARGET_ARCH_RISCV64 DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size && @@ -2615,7 +2615,7 @@ void TurboAssembler::ExtractBits(Register rt, Register rs, uint16_t pos, #endif } -void TurboAssembler::InsertBits(Register dest, Register source, Register pos, +void MacroAssembler::InsertBits(Register dest, Register source, Register pos, int size) { #if V8_TARGET_ARCH_RISCV64 DCHECK_LT(size, 64); @@ -2641,42 +2641,42 @@ void TurboAssembler::InsertBits(Register dest, Register source, Register pos, or_(dest, dest, source_); } -void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) { fneg_s(fd, fs); } +void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) { fneg_s(fd, fs); } -void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) { fneg_d(fd, fs); } +void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) { fneg_d(fd, fs); } -void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) { // Convert rs to a FP value in fd. fcvt_d_wu(fd, rs); } -void TurboAssembler::Cvt_d_w(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_d_w(FPURegister fd, Register rs) { // Convert rs to a FP value in fd. fcvt_d_w(fd, rs); } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) { // Convert rs to a FP value in fd. fcvt_d_lu(fd, rs); } #endif -void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) { // Convert rs to a FP value in fd. fcvt_s_wu(fd, rs); } -void TurboAssembler::Cvt_s_w(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_s_w(FPURegister fd, Register rs) { // Convert rs to a FP value in fd. fcvt_s_w(fd, rs); } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) { // Convert rs to a FP value in fd. fcvt_s_lu(fd, rs); } #endif template -void TurboAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs, +void MacroAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result, CvtFunc fcvt_generator) { // Save csr_fflags to scratch & clear exception flags @@ -2705,7 +2705,7 @@ void TurboAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs, } } -void TurboAssembler::Clear_if_nan_d(Register rd, FPURegister fs) { +void MacroAssembler::Clear_if_nan_d(Register rd, FPURegister fs) { Label no_nan; feq_d(kScratchReg, fs, fs); bnez(kScratchReg, &no_nan); @@ -2713,7 +2713,7 @@ void TurboAssembler::Clear_if_nan_d(Register rd, FPURegister fs) { bind(&no_nan); } -void TurboAssembler::Clear_if_nan_s(Register rd, FPURegister fs) { +void MacroAssembler::Clear_if_nan_s(Register rd, FPURegister fs) { Label no_nan; feq_s(kScratchReg, fs, fs); bnez(kScratchReg, &no_nan); @@ -2721,101 +2721,101 @@ void TurboAssembler::Clear_if_nan_s(Register rd, FPURegister fs) { bind(&no_nan); } -void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_uw_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_wu_d(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_wu_d(dst, src, RTZ); }); } -void TurboAssembler::Trunc_w_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_w_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_d(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_d(dst, src, RTZ); }); } -void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_uw_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_wu_s(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_wu_s(dst, src, RTZ); }); } -void TurboAssembler::Trunc_w_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_w_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_s(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_s(dst, src, RTZ); }); } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_ul_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_lu_d(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_lu_d(dst, src, RTZ); }); } -void TurboAssembler::Trunc_l_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_l_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_l_d(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_l_d(dst, src, RTZ); }); } -void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_ul_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_lu_s(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_lu_s(dst, src, RTZ); }); } -void TurboAssembler::Trunc_l_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_l_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_l_s(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_l_s(dst, src, RTZ); }); } #endif -void TurboAssembler::Round_w_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Round_w_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_s(dst, src, RNE); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_s(dst, src, RNE); }); } -void TurboAssembler::Round_w_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Round_w_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_d(dst, src, RNE); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_d(dst, src, RNE); }); } -void TurboAssembler::Ceil_w_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Ceil_w_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_s(dst, src, RUP); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_s(dst, src, RUP); }); } -void TurboAssembler::Ceil_w_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Ceil_w_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_d(dst, src, RUP); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_d(dst, src, RUP); }); } -void TurboAssembler::Floor_w_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Floor_w_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_s(dst, src, RDN); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_s(dst, src, RDN); }); } -void TurboAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_d(dst, src, RDN); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_d(dst, src, RDN); }); } @@ -2826,7 +2826,7 @@ void TurboAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) { // handling is needed by NaN, +/-Infinity, +/-0 #if V8_TARGET_ARCH_RISCV64 template -void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src, +void MacroAssembler::RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch, FPURoundingMode frm) { BlockTrampolinePoolScope block_trampoline_pool(this); UseScratchRegisterScope temps(this); @@ -2945,7 +2945,7 @@ void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src, // rounded result; this differs from behavior of RISCV fcvt instructions (which // round out-of-range values to the nearest max or min value), therefore special // handling is needed by NaN, +/-Infinity, +/-0 -void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, +void MacroAssembler::RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch, FPURoundingMode frm) { BlockTrampolinePoolScope block_trampoline_pool(this); UseScratchRegisterScope temps(this); @@ -3038,7 +3038,7 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, // round out-of-range values to the nearest max or min value), therefore special // handling is needed by NaN, +/-Infinity, +/-0 template -void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch, +void MacroAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch, VRegister v_scratch, FPURoundingMode frm) { VU.set(scratch, std::is_same::value ? E32 : E64, m1); // if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits @@ -3092,69 +3092,69 @@ void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch, } } -void TurboAssembler::Ceil_f(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Ceil_f(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RUP); } -void TurboAssembler::Ceil_d(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Ceil_d(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RUP); } -void TurboAssembler::Floor_f(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Floor_f(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RDN); } -void TurboAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RDN); } -void TurboAssembler::Trunc_d(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Trunc_d(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RTZ); } -void TurboAssembler::Trunc_f(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Trunc_f(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RTZ); } -void TurboAssembler::Round_f(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Round_f(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RNE); } -void TurboAssembler::Round_d(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Round_d(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RNE); } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src, +void MacroAssembler::Floor_d_d(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { RoundHelper(dst, src, fpu_scratch, RDN); } -void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src, +void MacroAssembler::Ceil_d_d(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { RoundHelper(dst, src, fpu_scratch, RUP); } -void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src, +void MacroAssembler::Trunc_d_d(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { RoundHelper(dst, src, fpu_scratch, RTZ); } -void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src, +void MacroAssembler::Round_d_d(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { RoundHelper(dst, src, fpu_scratch, RNE); } #endif -void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src, +void MacroAssembler::Floor_s_s(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { #if V8_TARGET_ARCH_RISCV64 RoundHelper(dst, src, fpu_scratch, RDN); @@ -3163,7 +3163,7 @@ void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src, #endif } -void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src, +void MacroAssembler::Ceil_s_s(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { #if V8_TARGET_ARCH_RISCV64 RoundHelper(dst, src, fpu_scratch, RUP); @@ -3172,7 +3172,7 @@ void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src, #endif } -void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src, +void MacroAssembler::Trunc_s_s(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { #if V8_TARGET_ARCH_RISCV64 RoundHelper(dst, src, fpu_scratch, RTZ); @@ -3181,7 +3181,7 @@ void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src, #endif } -void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src, +void MacroAssembler::Round_s_s(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { #if V8_TARGET_ARCH_RISCV64 RoundHelper(dst, src, fpu_scratch, RNE); @@ -3210,7 +3210,7 @@ void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, fmsub_d(fd, fs, ft, fr); } -void TurboAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1, +void MacroAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1, FPURegister cmp2) { switch (cc) { case EQ: @@ -3237,7 +3237,7 @@ void TurboAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1, } } -void TurboAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1, +void MacroAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1, FPURegister cmp2) { switch (cc) { case EQ: @@ -3264,7 +3264,7 @@ void TurboAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1, } } -void TurboAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1, +void MacroAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1, FPURegister cmp2) { UseScratchRegisterScope temps(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3275,7 +3275,7 @@ void TurboAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1, And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2) } -void TurboAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1, +void MacroAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1, FPURegister cmp2) { UseScratchRegisterScope temps(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3286,27 +3286,27 @@ void TurboAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1, And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2) } -void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1, +void MacroAssembler::CompareIsNanF32(Register rd, FPURegister cmp1, FPURegister cmp2) { CompareIsNotNanF32(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2) Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2) } -void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1, +void MacroAssembler::CompareIsNanF64(Register rd, FPURegister cmp1, FPURegister cmp2) { CompareIsNotNanF64(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2) Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2) } -void TurboAssembler::BranchTrueShortF(Register rs, Label* target) { +void MacroAssembler::BranchTrueShortF(Register rs, Label* target) { Branch(target, not_equal, rs, Operand(zero_reg)); } -void TurboAssembler::BranchFalseShortF(Register rs, Label* target) { +void MacroAssembler::BranchFalseShortF(Register rs, Label* target) { Branch(target, equal, rs, Operand(zero_reg)); } -void TurboAssembler::BranchTrueF(Register rs, Label* target) { +void MacroAssembler::BranchTrueF(Register rs, Label* target) { bool long_branch = target->is_bound() ? !is_near(target) : is_trampoline_emitted(); if (long_branch) { @@ -3319,7 +3319,7 @@ void TurboAssembler::BranchTrueF(Register rs, Label* target) { } } -void TurboAssembler::BranchFalseF(Register rs, Label* target) { +void MacroAssembler::BranchFalseF(Register rs, Label* target) { bool long_branch = target->is_bound() ? !is_near(target) : is_trampoline_emitted(); if (long_branch) { @@ -3332,7 +3332,7 @@ void TurboAssembler::BranchFalseF(Register rs, Label* target) { } } -void TurboAssembler::InsertHighWordF64(FPURegister dst, Register src_high) { +void MacroAssembler::InsertHighWordF64(FPURegister dst, Register src_high) { #if V8_TARGET_ARCH_RISCV64 UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3357,7 +3357,7 @@ void TurboAssembler::InsertHighWordF64(FPURegister dst, Register src_high) { #endif } -void TurboAssembler::InsertLowWordF64(FPURegister dst, Register src_low) { +void MacroAssembler::InsertLowWordF64(FPURegister dst, Register src_low) { #if V8_TARGET_ARCH_RISCV64 UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3382,7 +3382,7 @@ void TurboAssembler::InsertLowWordF64(FPURegister dst, Register src_low) { #endif } -void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) { +void MacroAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) { ASM_CODE_COMMENT(this); // Handle special values first. if (src == base::bit_cast(0.0f) && has_single_zero_reg_set_) { @@ -3408,7 +3408,7 @@ void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) { } } -void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) { +void MacroAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) { ASM_CODE_COMMENT(this); // Handle special values first. if (src == base::bit_cast(0.0) && has_double_zero_reg_set_) { @@ -3459,7 +3459,7 @@ void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) { } } -void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt, +void MacroAssembler::CompareI(Register rd, Register rs, const Operand& rt, Condition cond) { switch (cond) { case eq: @@ -3504,7 +3504,7 @@ void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt, } // dest <- (condition != 0 ? zero : dest) -void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, +void MacroAssembler::LoadZeroIfConditionNotZero(Register dest, Register condition) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3515,7 +3515,7 @@ void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, } // dest <- (condition == 0 ? 0 : dest) -void TurboAssembler::LoadZeroIfConditionZero(Register dest, +void MacroAssembler::LoadZeroIfConditionZero(Register dest, Register condition) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3525,7 +3525,7 @@ void TurboAssembler::LoadZeroIfConditionZero(Register dest, and_(dest, dest, scratch); } -void TurboAssembler::Clz32(Register rd, Register xx) { +void MacroAssembler::Clz32(Register rd, Register xx) { // 32 bit unsigned in lower word: count number of leading zeros. // int n = 32; // unsigned y; @@ -3602,7 +3602,7 @@ void TurboAssembler::Clz32(Register rd, Register xx) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Clz64(Register rd, Register xx) { +void MacroAssembler::Clz64(Register rd, Register xx) { // 64 bit: count number of leading zeros. // int n = 64; // unsigned y; @@ -3656,7 +3656,7 @@ void TurboAssembler::Clz64(Register rd, Register xx) { bind(&L5); } #endif -void TurboAssembler::Ctz32(Register rd, Register rs) { +void MacroAssembler::Ctz32(Register rd, Register rs) { // Convert trailing zeroes to trailing ones, and bits to their left // to zeroes. @@ -3680,7 +3680,7 @@ void TurboAssembler::Ctz32(Register rd, Register rs) { } } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Ctz64(Register rd, Register rs) { +void MacroAssembler::Ctz64(Register rd, Register rs) { // Convert trailing zeroes to trailing ones, and bits to their left // to zeroes. BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3703,7 +3703,7 @@ void TurboAssembler::Ctz64(Register rd, Register rs) { } } #endif -void TurboAssembler::Popcnt32(Register rd, Register rs, Register scratch) { +void MacroAssembler::Popcnt32(Register rd, Register rs, Register scratch) { DCHECK_NE(scratch, rs); DCHECK_NE(scratch, rd); // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel @@ -3754,7 +3754,7 @@ void TurboAssembler::Popcnt32(Register rd, Register rs, Register scratch) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Popcnt64(Register rd, Register rs, Register scratch) { +void MacroAssembler::Popcnt64(Register rd, Register rs, Register scratch) { DCHECK_NE(scratch, rs); DCHECK_NE(scratch, rd); // uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3 @@ -3790,7 +3790,7 @@ void TurboAssembler::Popcnt64(Register rd, Register rs, Register scratch) { srli(rd, rd, 32 + shift); } #endif -void TurboAssembler::TryInlineTruncateDoubleToI(Register result, +void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label* done) { UseScratchRegisterScope temps(this); @@ -3801,7 +3801,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result, Branch(done, eq, scratch, Operand(1)); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DoubleRegister double_input, StubCallMode stub_mode) { @@ -3837,19 +3837,19 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \ (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg))) -void TurboAssembler::Branch(int32_t offset) { +void MacroAssembler::Branch(int32_t offset) { DCHECK(is_int21(offset)); BranchShort(offset); } -void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs, +void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs, const Operand& rt, Label::Distance near_jump) { bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt); DCHECK(is_near); USE(is_near); } -void TurboAssembler::Branch(Label* L) { +void MacroAssembler::Branch(Label* L) { if (L->is_bound()) { if (is_near(L)) { BranchShort(L); @@ -3865,7 +3865,7 @@ void TurboAssembler::Branch(Label* L) { } } -void TurboAssembler::Branch(Label* L, Condition cond, Register rs, +void MacroAssembler::Branch(Label* L, Condition cond, Register rs, const Operand& rt, Label::Distance near_jump) { if (L->is_bound()) { if (!BranchShortCheck(0, L, cond, rs, rt)) { @@ -3898,7 +3898,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs, } } -void TurboAssembler::Branch(Label* L, Condition cond, Register rs, +void MacroAssembler::Branch(Label* L, Condition cond, Register rs, RootIndex index) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3906,20 +3906,20 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs, Branch(L, cond, rs, Operand(scratch)); } -void TurboAssembler::BranchShortHelper(int32_t offset, Label* L) { +void MacroAssembler::BranchShortHelper(int32_t offset, Label* L) { DCHECK(L == nullptr || offset == 0); offset = GetOffset(offset, L, OffsetSize::kOffset21); j(offset); } -void TurboAssembler::BranchShort(int32_t offset) { +void MacroAssembler::BranchShort(int32_t offset) { DCHECK(is_int21(offset)); BranchShortHelper(offset, nullptr); } -void TurboAssembler::BranchShort(Label* L) { BranchShortHelper(0, L); } +void MacroAssembler::BranchShort(Label* L) { BranchShortHelper(0, L); } -int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { +int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { if (L) { offset = branch_offset_helper(L, bits); } else { @@ -3928,7 +3928,7 @@ int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { return offset; } -Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, +Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt, Register scratch) { Register r2 = no_reg; if (rt.is_reg()) { @@ -3941,14 +3941,14 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, return r2; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, +bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits) { if (!is_near(L, bits)) return false; *offset = GetOffset(*offset, L, bits); return true; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, +bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, Register* scratch, const Operand& rt) { if (!is_near(L, bits)) return false; *scratch = GetRtAsRegisterHelper(rt, *scratch); @@ -3956,7 +3956,7 @@ bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, return true; } -bool TurboAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond, +bool MacroAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt) { DCHECK(L == nullptr || offset == 0); UseScratchRegisterScope temps(this); @@ -4084,7 +4084,7 @@ bool TurboAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond, return true; } -bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, +bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt) { BRANCH_ARGS_CHECK(cond, rs, rt); @@ -4097,28 +4097,28 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, } } -void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs, +void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs, const Operand& rt) { BranchShortCheck(offset, nullptr, cond, rs, rt); } -void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs, +void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, const Operand& rt) { BranchShortCheck(0, L, cond, rs, rt); } -void TurboAssembler::BranchAndLink(int32_t offset) { +void MacroAssembler::BranchAndLink(int32_t offset) { BranchAndLinkShort(offset); } -void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs, +void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs, const Operand& rt) { bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt); DCHECK(is_near); USE(is_near); } -void TurboAssembler::BranchAndLink(Label* L) { +void MacroAssembler::BranchAndLink(Label* L) { if (L->is_bound()) { if (is_near(L)) { BranchAndLinkShort(L); @@ -4134,7 +4134,7 @@ void TurboAssembler::BranchAndLink(Label* L) { } } -void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs, +void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, const Operand& rt) { if (L->is_bound()) { if (!BranchAndLinkShortCheck(0, L, cond, rs, rt)) { @@ -4157,25 +4157,25 @@ void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs, } } -void TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L) { +void MacroAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L) { DCHECK(L == nullptr || offset == 0); offset = GetOffset(offset, L, OffsetSize::kOffset21); jal(offset); } -void TurboAssembler::BranchAndLinkShort(int32_t offset) { +void MacroAssembler::BranchAndLinkShort(int32_t offset) { DCHECK(is_int21(offset)); BranchAndLinkShortHelper(offset, nullptr); } -void TurboAssembler::BranchAndLinkShort(Label* L) { +void MacroAssembler::BranchAndLinkShort(Label* L) { BranchAndLinkShortHelper(0, L); } // Pre r6 we need to use a bgezal or bltzal, but they can't be used directly // with the slt instructions. We could use sub or add instead but we would miss // overflow cases, so we keep slt and add an intermediate third instruction. -bool TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L, +bool MacroAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt) { DCHECK(L == nullptr || offset == 0); @@ -4198,7 +4198,7 @@ bool TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L, return true; } -bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, +bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt) { BRANCH_ARGS_CHECK(cond, rs, rt); @@ -4212,7 +4212,7 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, } } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); @@ -4221,11 +4221,11 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, constant_index))); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { LoadWord(destination, MemOperand(kRootRegister, offset)); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { Move(destination, kRootRegister); @@ -4234,7 +4234,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -void TurboAssembler::Jump(Register target, Condition cond, Register rs, +void MacroAssembler::Jump(Register target, Condition cond, Register rs, const Operand& rt) { BlockTrampolinePoolScope block_trampoline_pool(this); if (cond == cc_always) { @@ -4247,7 +4247,7 @@ void TurboAssembler::Jump(Register target, Condition cond, Register rs, } } -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt) { Label skip; if (cond != cc_always) { @@ -4262,13 +4262,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, } } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(static_cast(target), rmode, cond, rs, rt); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt) { DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK_IMPLIES(options().isolate_independent_code, @@ -4301,13 +4301,13 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, Jump(static_cast(target_index), rmode, cond, rs, rt); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { li(t6, reference); Jump(t6); } // Note: To call gcc-compiled C code on riscv64, you must call through t6. -void TurboAssembler::Call(Register target, Condition cond, Register rs, +void MacroAssembler::Call(Register target, Condition cond, Register rs, const Operand& rt) { BlockTrampolinePoolScope block_trampoline_pool(this); if (cond == cc_always) { @@ -4334,13 +4334,13 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, } } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt) { li(t6, Operand(static_cast(target), rmode), ADDRESS_LOAD); Call(t6, cond, rs, rt); } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(RelocInfo::IsCodeTarget(rmode)); @@ -4374,7 +4374,7 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, // Call(static_cast
(target_index), rmode, cond, rs, rt); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin) { #if V8_TARGET_ARCH_RISCV64 static_assert(kSystemPointerSize == 8); #elif V8_TARGET_ARCH_RISCV32 @@ -4390,12 +4390,12 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin) { MemOperand(builtin, IsolateData::builtin_entry_table_offset())); } -void TurboAssembler::CallBuiltinByIndex(Register builtin) { +void MacroAssembler::CallBuiltinByIndex(Register builtin) { LoadEntryFromBuiltinIndex(builtin); Call(builtin); } -void TurboAssembler::CallBuiltin(Builtin builtin) { +void MacroAssembler::CallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); switch (options().builtin_call_jump_mode) { case BuiltinCallJumpMode::kAbsolute: { @@ -4428,7 +4428,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin) { +void MacroAssembler::TailCallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); switch (options().builtin_call_jump_mode) { @@ -4462,18 +4462,18 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) { } } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { LoadWord(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::PatchAndJump(Address target) { +void MacroAssembler::PatchAndJump(Address target) { BlockTrampolinePoolScope block_trampoline_pool(this); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -4490,7 +4490,7 @@ void TurboAssembler::PatchAndJump(Address target) { pc_ += sizeof(uintptr_t); } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. // @@ -4528,14 +4528,14 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); } -void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt) { +void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt) { Jump(ra, cond, rs, rt); if (cond == al) { ForceConstantPoolEmissionWithoutJump(); } } -void TurboAssembler::BranchLong(Label* L) { +void MacroAssembler::BranchLong(Label* L) { // Generate position independent long branch. BlockTrampolinePoolScope block_trampoline_pool(this); int32_t imm; @@ -4544,7 +4544,7 @@ void TurboAssembler::BranchLong(Label* L) { EmitConstPoolWithJumpIfNeeded(); } -void TurboAssembler::BranchAndLinkLong(Label* L) { +void MacroAssembler::BranchAndLinkLong(Label* L) { // Generate position independent long branch and link. BlockTrampolinePoolScope block_trampoline_pool(this); int32_t imm; @@ -4552,12 +4552,12 @@ void TurboAssembler::BranchAndLinkLong(Label* L) { GenPCRelativeJumpAndLink(t6, imm); } -void TurboAssembler::DropAndRet(int drop) { +void MacroAssembler::DropAndRet(int drop) { AddWord(sp, sp, drop * kSystemPointerSize); Ret(); } -void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, +void MacroAssembler::DropAndRet(int drop, Condition cond, Register r1, const Operand& r2) { // Both Drop and Ret need to be conditional. Label skip; @@ -4573,7 +4573,7 @@ void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, } } -void TurboAssembler::Drop(int count, Condition cond, Register reg, +void MacroAssembler::Drop(int count, Condition cond, Register reg, const Operand& op) { if (count <= 0) { return; @@ -4604,9 +4604,9 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { } } -void TurboAssembler::Call(Label* target) { BranchAndLink(target); } +void MacroAssembler::Call(Label* target) { BranchAndLink(target); } -void TurboAssembler::LoadAddress(Register dst, Label* target, +void MacroAssembler::LoadAddress(Register dst, Label* target, RelocInfo::Mode rmode) { int32_t offset; if (CalculateOffset(target, &offset, OffsetSize::kOffset32)) { @@ -4622,14 +4622,14 @@ void TurboAssembler::LoadAddress(Register dst, Label* target, } } -void TurboAssembler::Push(Smi smi) { +void MacroAssembler::Push(Smi smi) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(smi)); push(scratch); } -void TurboAssembler::PushArray(Register array, Register size, +void MacroAssembler::PushArray(Register array, Register size, PushArrayOrder order) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -4658,7 +4658,7 @@ void TurboAssembler::PushArray(Register array, Register size, } } -void TurboAssembler::Push(Handle handle) { +void MacroAssembler::Push(Handle handle) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(handle)); @@ -4701,7 +4701,7 @@ void MacroAssembler::PopStackHandler() { StoreWord(a1, MemOperand(scratch)); } -void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, +void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src) { // Subtracting 0.0 preserves all inputs except for signalling NaNs, which // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0 @@ -4712,19 +4712,19 @@ void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, fsub_d(dst, src, kDoubleRegZero); } -void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { Move(dst, fa0); // Reg fa0 is FP return value. } -void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { Move(dst, fa0); // Reg fa0 is FP first argument value. } -void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(fa0, src); } +void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(fa0, src); } -void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(fa0, src); } +void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(fa0, src); } -void TurboAssembler::MovToFloatParameters(DoubleRegister src1, +void MacroAssembler::MovToFloatParameters(DoubleRegister src1, DoubleRegister src2) { const DoubleRegister fparg2 = fa1; if (src2 == fa0) { @@ -4747,10 +4747,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); LoadWord(destination, MemOperand(kRootRegister, static_cast(offset))); @@ -4992,7 +4992,7 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg, } //------------------------------------------------------------------------------ // Wasm -void TurboAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, +void MacroAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul) { VU.set(kScratchReg, sew, lmul); vmseq_vv(v0, lhs, rhs); @@ -5001,7 +5001,7 @@ void TurboAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, +void MacroAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul) { VU.set(kScratchReg, sew, lmul); vmsne_vv(v0, lhs, rhs); @@ -5010,7 +5010,7 @@ void TurboAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, +void MacroAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul) { VU.set(kScratchReg, sew, lmul); vmsle_vv(v0, rhs, lhs); @@ -5019,7 +5019,7 @@ void TurboAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, +void MacroAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul) { VU.set(kScratchReg, sew, lmul); vmsleu_vv(v0, rhs, lhs); @@ -5028,7 +5028,7 @@ void TurboAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, +void MacroAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul) { VU.set(kScratchReg, sew, lmul); vmslt_vv(v0, rhs, lhs); @@ -5037,7 +5037,7 @@ void TurboAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, +void MacroAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul) { VU.set(kScratchReg, sew, lmul); vmsltu_vv(v0, rhs, lhs); @@ -5046,7 +5046,7 @@ void TurboAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) { +void MacroAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) { uint64_t imm1 = *(reinterpret_cast(imms)); uint64_t imm2 = *((reinterpret_cast(imms)) + 1); VU.set(kScratchReg, VSew::E64, Vlmul::m1); @@ -5059,7 +5059,7 @@ void TurboAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) { vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx, +void MacroAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx, MemOperand src) { if (ts == 8) { Lbu(kScratchReg2, src); @@ -5091,7 +5091,7 @@ void TurboAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx, } } -void TurboAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx, +void MacroAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst) { if (sz == 8) { VU.set(kScratchReg, E8, m1); @@ -5119,7 +5119,7 @@ void TurboAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx, // ----------------------------------------------------------------------------- // Runtime calls. #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::AddOverflow64(Register dst, Register left, +void MacroAssembler::AddOverflow64(Register dst, Register left, const Operand& right, Register overflow) { UseScratchRegisterScope temps(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5149,7 +5149,7 @@ void TurboAssembler::AddOverflow64(Register dst, Register left, } } -void TurboAssembler::SubOverflow64(Register dst, Register left, +void MacroAssembler::SubOverflow64(Register dst, Register left, const Operand& right, Register overflow) { UseScratchRegisterScope temps(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5181,7 +5181,7 @@ void TurboAssembler::SubOverflow64(Register dst, Register left, } } -void TurboAssembler::MulOverflow32(Register dst, Register left, +void MacroAssembler::MulOverflow32(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -5207,7 +5207,7 @@ void TurboAssembler::MulOverflow32(Register dst, Register left, xor_(overflow, overflow, dst); } -void TurboAssembler::MulOverflow64(Register dst, Register left, +void MacroAssembler::MulOverflow64(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -5238,7 +5238,7 @@ void TurboAssembler::MulOverflow64(Register dst, Register left, } #elif V8_TARGET_ARCH_RISCV32 -void TurboAssembler::AddOverflow(Register dst, Register left, +void MacroAssembler::AddOverflow(Register dst, Register left, const Operand& right, Register overflow) { UseScratchRegisterScope temps(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5268,7 +5268,7 @@ void TurboAssembler::AddOverflow(Register dst, Register left, } } -void TurboAssembler::SubOverflow(Register dst, Register left, +void MacroAssembler::SubOverflow(Register dst, Register left, const Operand& right, Register overflow) { UseScratchRegisterScope temps(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5300,7 +5300,7 @@ void TurboAssembler::SubOverflow(Register dst, Register left, } } -void TurboAssembler::MulOverflow32(Register dst, Register left, +void MacroAssembler::MulOverflow32(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -5422,15 +5422,15 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, // ----------------------------------------------------------------------------- // Debugging. -void TurboAssembler::Trap() { stop(); } -void TurboAssembler::DebugBreak() { stop(); } +void MacroAssembler::Trap() { stop(); } +void MacroAssembler::DebugBreak() { stop(); } -void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs, +void MacroAssembler::Assert(Condition cc, AbortReason reason, Register rs, Operand rt) { if (v8_flags.debug_code) Check(cc, reason, rs, rt); } -void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, +void MacroAssembler::Check(Condition cc, AbortReason reason, Register rs, Operand rt) { Label L; BranchShort(&L, cc, rs, rt); @@ -5439,7 +5439,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, bind(&L); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); if (v8_flags.code_comments) { @@ -5496,7 +5496,7 @@ void TurboAssembler::Abort(AbortReason reason) { } } -void TurboAssembler::LoadMap(Register destination, Register object) { +void MacroAssembler::LoadMap(Register destination, Register object) { ASM_CODE_COMMENT(this); LoadTaggedPointerField(destination, FieldMemOperand(object, HeapObject::kMapOffset)); @@ -5511,7 +5511,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index))); } -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -5519,9 +5519,9 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) { PushCommonFrame(scratch); } -void TurboAssembler::Prologue() { PushStandardFrame(a1); } +void MacroAssembler::Prologue() { PushStandardFrame(a1); } -void TurboAssembler::EnterFrame(StackFrame::Type type) { +void MacroAssembler::EnterFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -5538,7 +5538,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { #endif // V8_ENABLE_WEBASSEMBLY } -void TurboAssembler::LeaveFrame(StackFrame::Type type) { +void MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); addi(sp, fp, 2 * kSystemPointerSize); LoadWord(ra, MemOperand(fp, 1 * kSystemPointerSize)); @@ -5661,7 +5661,7 @@ void MacroAssembler::LeaveExitFrame(Register argument_count, bool do_return, } } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if V8_HOST_ARCH_RISCV32 || V8_HOST_ARCH_RISCV64 // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -5699,7 +5699,7 @@ void MacroAssembler::AssertStackIsAligned() { } } -void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) { ASM_CODE_COMMENT(this); if (SmiValuesAre32Bits()) { Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset()))); @@ -5714,7 +5714,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { } } -void TurboAssembler::SmiToInt32(Register smi) { +void MacroAssembler::SmiToInt32(Register smi) { ASM_CODE_COMMENT(this); if (v8_flags.enable_slow_asserts) { AssertSmi(smi); @@ -5723,7 +5723,7 @@ void TurboAssembler::SmiToInt32(Register smi) { SmiUntag(smi); } -void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) { +void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) { ASM_CODE_COMMENT(this); DCHECK_EQ(0, kSmiTag); UseScratchRegisterScope temps(this); @@ -5754,7 +5754,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) { Branch(not_smi_label, ne, scratch, Operand(zero_reg)); } -void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) { +void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) { if (v8_flags.debug_code) { ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -5764,7 +5764,7 @@ void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) { } } -void TurboAssembler::AssertSmi(Register object, AbortReason reason) { +void MacroAssembler::AssertSmi(Register object, AbortReason reason) { if (v8_flags.debug_code) { ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -5877,7 +5877,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, } template -void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1, +void MacroAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1, FPURegister src2, MaxMinKind kind) { DCHECK((std::is_same::value) || (std::is_same::value)); @@ -5932,25 +5932,25 @@ void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1, bind(&done); } -void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1, FPURegister src2) { ASM_CODE_COMMENT(this); FloatMinMaxHelper(dst, src1, src2, MaxMinKind::kMax); } -void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1, FPURegister src2) { ASM_CODE_COMMENT(this); FloatMinMaxHelper(dst, src1, src2, MaxMinKind::kMin); } -void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1, FPURegister src2) { ASM_CODE_COMMENT(this); FloatMinMaxHelper(dst, src1, src2, MaxMinKind::kMax); } -void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1, FPURegister src2) { ASM_CODE_COMMENT(this); FloatMinMaxHelper(dst, src1, src2, MaxMinKind::kMin); @@ -5958,7 +5958,7 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, static const int kRegisterPassedArguments = 8; -int TurboAssembler::CalculateStackPassedDWords(int num_gp_arguments, +int MacroAssembler::CalculateStackPassedDWords(int num_gp_arguments, int num_fp_arguments) { int stack_passed_dwords = 0; @@ -5974,7 +5974,7 @@ int TurboAssembler::CalculateStackPassedDWords(int num_gp_arguments, return stack_passed_dwords; } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { ASM_CODE_COMMENT(this); @@ -5999,12 +5999,12 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, } } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, Register scratch) { PrepareCallCFunction(num_reg_arguments, 0, scratch); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { BlockTrampolinePoolScope block_trampoline_pool(this); @@ -6012,21 +6012,21 @@ void TurboAssembler::CallCFunction(ExternalReference function, CallCFunctionHelper(t6, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, +void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments) { CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunctionHelper(Register function, +void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); @@ -6114,7 +6114,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, #undef BRANCH_ARGS_CHECK -void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, +void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met) { And(scratch, object, Operand(~kPageAlignmentMask)); LoadWord(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); @@ -6137,7 +6137,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, UNREACHABLE(); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { auto pc = -pc_offset(); auipc(dst, 0); if (pc != 0) { @@ -6145,7 +6145,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { } } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); @@ -6158,12 +6158,12 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::LoadCodeEntry(Register destination, Register code) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code) { ASM_CODE_COMMENT(this); LoadWord(destination, FieldMemOperand(code, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -6172,13 +6172,13 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code) { +void MacroAssembler::CallCodeObject(Register code) { ASM_CODE_COMMENT(this); LoadCodeEntry(code, code); Call(code); } -void TurboAssembler::JumpCodeObject(Register code, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); LoadCodeEntry(code, code); @@ -6186,7 +6186,7 @@ void TurboAssembler::JumpCodeObject(Register code, JumpMode jump_mode) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::LoadTaggedPointerField(const Register& destination, +void MacroAssembler::LoadTaggedPointerField(const Register& destination, const MemOperand& field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressTaggedPointer(destination, field_operand); @@ -6195,7 +6195,7 @@ void TurboAssembler::LoadTaggedPointerField(const Register& destination, } } -void TurboAssembler::LoadAnyTaggedField(const Register& destination, +void MacroAssembler::LoadAnyTaggedField(const Register& destination, const MemOperand& field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressAnyTagged(destination, field_operand); @@ -6204,7 +6204,7 @@ void TurboAssembler::LoadAnyTaggedField(const Register& destination, } } -void TurboAssembler::LoadTaggedSignedField(const Register& destination, +void MacroAssembler::LoadTaggedSignedField(const Register& destination, const MemOperand& field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressTaggedSigned(destination, field_operand); @@ -6213,11 +6213,11 @@ void TurboAssembler::LoadTaggedSignedField(const Register& destination, } } -void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) { SmiUntag(dst, src); } -void TurboAssembler::StoreTaggedField(const Register& value, +void MacroAssembler::StoreTaggedField(const Register& value, const MemOperand& dst_field_operand) { if (COMPRESS_POINTERS_BOOL) { Sw(value, dst_field_operand); @@ -6226,7 +6226,7 @@ void TurboAssembler::StoreTaggedField(const Register& value, } } -void TurboAssembler::DecompressTaggedSigned(const Register& destination, +void MacroAssembler::DecompressTaggedSigned(const Register& destination, const MemOperand& field_operand) { ASM_CODE_COMMENT(this); Lwu(destination, field_operand); @@ -6237,21 +6237,21 @@ void TurboAssembler::DecompressTaggedSigned(const Register& destination, } } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, +void MacroAssembler::DecompressTaggedPointer(const Register& destination, const MemOperand& field_operand) { ASM_CODE_COMMENT(this); Lwu(destination, field_operand); AddWord(destination, kPtrComprCageBaseRegister, destination); } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, +void MacroAssembler::DecompressTaggedPointer(const Register& destination, const Register& source) { ASM_CODE_COMMENT(this); And(destination, source, Operand(0xFFFFFFFF)); AddWord(destination, kPtrComprCageBaseRegister, Operand(destination)); } -void TurboAssembler::DecompressAnyTagged(const Register& destination, +void MacroAssembler::DecompressAnyTagged(const Register& destination, const MemOperand& field_operand) { ASM_CODE_COMMENT(this); Lwu(destination, field_operand); diff --git a/src/codegen/riscv/macro-assembler-riscv.h b/src/codegen/riscv/macro-assembler-riscv.h index 22858331fa..1a15e133f8 100644 --- a/src/codegen/riscv/macro-assembler-riscv.h +++ b/src/codegen/riscv/macro-assembler-riscv.h @@ -90,9 +90,9 @@ inline MemOperand CFunctionArgumentOperand(int index) { return MemOperand(sp, offset); } -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; // Activation support. void EnterFrame(StackFrame::Type type); @@ -1174,71 +1174,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src); void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst); - protected: - inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); - inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); - - private: - bool has_double_zero_reg_set_ = false; - bool has_single_zero_reg_set_ = false; - - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it - // succeeds, otherwise falls through if result is saturated. On return - // 'result' either holds answer, or is clobbered on fall through. - void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, - Label* done); - - void CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments); - - // TODO(RISCV) Reorder parameters so out parameters come last. - bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); - bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, - Register* scratch, const Operand& rt); - - void BranchShortHelper(int32_t offset, Label* L); - bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs, - const Operand& rt); - bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, - const Operand& rt); - - void BranchAndLinkShortHelper(int32_t offset, Label* L); - void BranchAndLinkShort(int32_t offset); - void BranchAndLinkShort(Label* L); - bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt); - bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt); - void BranchAndLinkLong(Label* L); -#if V8_TARGET_ARCH_RISCV64 - template - void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch, - FPURoundingMode mode); -#elif V8_TARGET_ARCH_RISCV32 - void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch, - FPURoundingMode mode); - - void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch, - FPURoundingMode mode); -#endif - template - void RoundHelper(VRegister dst, VRegister src, Register scratch, - VRegister v_scratch, FPURoundingMode frm); - - template - void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result, - TruncFunc trunc); - - // Push a fixed frame, consisting of ra, fp. - void PushCommonFrame(Register marker_reg = no_reg); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - // It assumes that the arguments are located below the stack pointer. // argc is the number of arguments not including the receiver. // TODO(victorgomes): Remove this function once we stick with the reversed @@ -1521,7 +1456,65 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { DecodeField(reg, reg); } + protected: + inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); + inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); + private: + bool has_double_zero_reg_set_ = false; + bool has_single_zero_reg_set_ = false; + + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it + // succeeds, otherwise falls through if result is saturated. On return + // 'result' either holds answer, or is clobbered on fall through. + void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, + Label* done); + + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments); + + // TODO(RISCV) Reorder parameters so out parameters come last. + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt); + + void BranchShortHelper(int32_t offset, Label* L); + bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs, + const Operand& rt); + bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, + const Operand& rt); + + void BranchAndLinkShortHelper(int32_t offset, Label* L); + void BranchAndLinkShort(int32_t offset); + void BranchAndLinkShort(Label* L); + bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt); + bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt); + void BranchAndLinkLong(Label* L); +#if V8_TARGET_ARCH_RISCV64 + template + void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch, + FPURoundingMode mode); +#elif V8_TARGET_ARCH_RISCV32 + void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch, + FPURoundingMode mode); + + void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch, + FPURoundingMode mode); +#endif + template + void RoundHelper(VRegister dst, VRegister src, Register scratch, + VRegister v_scratch, FPURoundingMode frm); + + template + void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result, + TruncFunc trunc); + + // Push a fixed frame, consisting of ra, fp. + void PushCommonFrame(Register marker_reg = no_reg); + // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, Label* done, @@ -1538,7 +1531,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { }; template -void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, +void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count, Func GetLabelFunction) { // Ensure that dd-ed labels following this instruction use 8 bytes aligned // addresses. diff --git a/src/codegen/s390/assembler-s390.h b/src/codegen/s390/assembler-s390.h index 511e8c2489..9b0c1cedf2 100644 --- a/src/codegen/s390/assembler-s390.h +++ b/src/codegen/s390/assembler-s390.h @@ -1494,7 +1494,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope { private: friend class Assembler; - friend class TurboAssembler; + friend class MacroAssembler; Assembler* assembler_; RegList old_available_; diff --git a/src/codegen/s390/macro-assembler-s390.cc b/src/codegen/s390/macro-assembler-s390.cc index 817ab84aed..32e925b814 100644 --- a/src/codegen/s390/macro-assembler-s390.cc +++ b/src/codegen/s390/macro-assembler-s390.cc @@ -55,7 +55,7 @@ constexpr int kStackSavedSavedFPSizeInBytes = } // namespace -void TurboAssembler::DoubleMax(DoubleRegister result_reg, +void MacroAssembler::DoubleMax(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg) { if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) { @@ -101,7 +101,7 @@ void TurboAssembler::DoubleMax(DoubleRegister result_reg, bind(&done); } -void TurboAssembler::DoubleMin(DoubleRegister result_reg, +void MacroAssembler::DoubleMin(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg) { if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) { @@ -152,7 +152,7 @@ void TurboAssembler::DoubleMin(DoubleRegister result_reg, bind(&done); } -void TurboAssembler::FloatMax(DoubleRegister result_reg, +void MacroAssembler::FloatMax(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg) { if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) { @@ -197,7 +197,7 @@ void TurboAssembler::FloatMax(DoubleRegister result_reg, bind(&done); } -void TurboAssembler::FloatMin(DoubleRegister result_reg, +void MacroAssembler::FloatMin(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg) { if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) { @@ -249,39 +249,39 @@ void TurboAssembler::FloatMin(DoubleRegister result_reg, bind(&done); } -void TurboAssembler::CeilF32(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::CeilF32(DoubleRegister dst, DoubleRegister src) { fiebra(ROUND_TOWARD_POS_INF, dst, src); } -void TurboAssembler::CeilF64(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::CeilF64(DoubleRegister dst, DoubleRegister src) { fidbra(ROUND_TOWARD_POS_INF, dst, src); } -void TurboAssembler::FloorF32(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::FloorF32(DoubleRegister dst, DoubleRegister src) { fiebra(ROUND_TOWARD_NEG_INF, dst, src); } -void TurboAssembler::FloorF64(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::FloorF64(DoubleRegister dst, DoubleRegister src) { fidbra(ROUND_TOWARD_NEG_INF, dst, src); } -void TurboAssembler::TruncF32(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::TruncF32(DoubleRegister dst, DoubleRegister src) { fiebra(ROUND_TOWARD_0, dst, src); } -void TurboAssembler::TruncF64(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::TruncF64(DoubleRegister dst, DoubleRegister src) { fidbra(ROUND_TOWARD_0, dst, src); } -void TurboAssembler::NearestIntF32(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::NearestIntF32(DoubleRegister dst, DoubleRegister src) { fiebra(ROUND_TO_NEAREST_TO_EVEN, dst, src); } -void TurboAssembler::NearestIntF64(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::NearestIntF64(DoubleRegister dst, DoubleRegister src) { fidbra(ROUND_TO_NEAREST_TO_EVEN, dst, src); } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const { @@ -298,7 +298,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; @@ -316,7 +316,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch, +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; @@ -333,7 +333,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch, return bytes; } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); @@ -350,11 +350,11 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, r1); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { LoadU64(destination, MemOperand(kRootRegister, offset)); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { mov(destination, kRootRegister); @@ -366,7 +366,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -MemOperand TurboAssembler::ExternalReferenceAsOperand( +MemOperand MacroAssembler::ExternalReferenceAsOperand( ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t offset = @@ -396,9 +396,9 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand( return MemOperand(scratch, 0); } -void TurboAssembler::Jump(Register target, Condition cond) { b(cond, target); } +void MacroAssembler::Jump(Register target, Condition cond) { b(cond, target); } -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond) { Label skip; @@ -410,13 +410,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, bind(&skip); } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(static_cast(target), rmode, cond); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond) { DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK_IMPLIES(options().isolate_independent_code, @@ -431,14 +431,14 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); Move(scratch, reference); Jump(scratch); } -void TurboAssembler::Call(Register target) { +void MacroAssembler::Call(Register target) { // Branch to target via indirect branch basr(r14, target); } @@ -461,7 +461,7 @@ int MacroAssembler::CallSizeNotPredictableCodeSize(Address target, return size; } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond) { DCHECK(cond == al); @@ -469,7 +469,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, basr(r14, ip); } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, Condition cond) { DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al); @@ -485,7 +485,7 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, call(code, rmode); } -void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) { +void MacroAssembler::CallBuiltin(Builtin builtin, Condition cond) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); // Use ip directly instead of using UseScratchRegisterScope, as we do not // preserve scratch registers across calls. @@ -509,7 +509,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { +void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); // Use ip directly instead of using UseScratchRegisterScope, as we do not @@ -539,7 +539,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { } } -void TurboAssembler::Drop(int count) { +void MacroAssembler::Drop(int count) { if (count > 0) { int total = count * kSystemPointerSize; if (is_uint12(total)) { @@ -552,7 +552,7 @@ void TurboAssembler::Drop(int count) { } } -void TurboAssembler::Drop(Register count, Register scratch) { +void MacroAssembler::Drop(Register count, Register scratch) { ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2)); AddS64(sp, sp, scratch); } @@ -568,19 +568,19 @@ Operand MacroAssembler::ClearedValue() const { static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); } -void TurboAssembler::Call(Label* target) { b(r14, target); } +void MacroAssembler::Call(Label* target) { b(r14, target); } -void TurboAssembler::Push(Handle handle) { +void MacroAssembler::Push(Handle handle) { mov(r0, Operand(handle)); push(r0); } -void TurboAssembler::Push(Smi smi) { +void MacroAssembler::Push(Smi smi) { mov(r0, Operand(smi)); push(r0); } -void TurboAssembler::Move(Register dst, Handle value, +void MacroAssembler::Move(Register dst, Handle value, RelocInfo::Mode rmode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than @@ -598,7 +598,7 @@ void TurboAssembler::Move(Register dst, Handle value, } } -void TurboAssembler::Move(Register dst, ExternalReference reference) { +void MacroAssembler::Move(Register dst, ExternalReference reference) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -609,7 +609,7 @@ void TurboAssembler::Move(Register dst, ExternalReference reference) { mov(dst, Operand(reference)); } -void TurboAssembler::Move(Register dst, Register src, Condition cond) { +void MacroAssembler::Move(Register dst, Register src, Condition cond) { if (dst != src) { if (cond == al) { mov(dst, src); @@ -619,38 +619,38 @@ void TurboAssembler::Move(Register dst, Register src, Condition cond) { } } -void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { if (dst != src) { ldr(dst, src); } } -void TurboAssembler::Move(Register dst, const MemOperand& src) { +void MacroAssembler::Move(Register dst, const MemOperand& src) { LoadU64(dst, src); } // Wrapper around Assembler::mvc (SS-a format) -void TurboAssembler::MoveChar(const MemOperand& opnd1, const MemOperand& opnd2, +void MacroAssembler::MoveChar(const MemOperand& opnd1, const MemOperand& opnd2, const Operand& length) { mvc(opnd1, opnd2, Operand(static_cast(length.immediate() - 1))); } // Wrapper around Assembler::clc (SS-a format) -void TurboAssembler::CompareLogicalChar(const MemOperand& opnd1, +void MacroAssembler::CompareLogicalChar(const MemOperand& opnd1, const MemOperand& opnd2, const Operand& length) { clc(opnd1, opnd2, Operand(static_cast(length.immediate() - 1))); } // Wrapper around Assembler::xc (SS-a format) -void TurboAssembler::ExclusiveOrChar(const MemOperand& opnd1, +void MacroAssembler::ExclusiveOrChar(const MemOperand& opnd1, const MemOperand& opnd2, const Operand& length) { xc(opnd1, opnd2, Operand(static_cast(length.immediate() - 1))); } // Wrapper around Assembler::risbg(n) (RIE-f) -void TurboAssembler::RotateInsertSelectBits(Register dst, Register src, +void MacroAssembler::RotateInsertSelectBits(Register dst, Register src, const Operand& startBit, const Operand& endBit, const Operand& shiftAmt, @@ -663,7 +663,7 @@ void TurboAssembler::RotateInsertSelectBits(Register dst, Register src, risbg(dst, src, startBit, endBit, shiftAmt); } -void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc, +void MacroAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc, Label* L) { #if V8_TARGET_ARCH_S390X brxhg(dst, inc, L); @@ -672,7 +672,7 @@ void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc, #endif // V8_TARGET_ARCH_S390X } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, Register scratch2, PushArrayOrder order) { Label loop, done; @@ -703,7 +703,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch, } } -void TurboAssembler::MultiPush(RegList regs, Register location) { +void MacroAssembler::MultiPush(RegList regs, Register location) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kSystemPointerSize; @@ -716,7 +716,7 @@ void TurboAssembler::MultiPush(RegList regs, Register location) { } } -void TurboAssembler::MultiPop(RegList regs, Register location) { +void MacroAssembler::MultiPop(RegList regs, Register location) { int16_t stack_offset = 0; for (int16_t i = 0; i < Register::kNumRegisters; i++) { @@ -728,7 +728,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) { AddS64(location, location, Operand(stack_offset)); } -void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) { +void MacroAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) { int16_t num_to_push = dregs.Count(); int16_t stack_offset = num_to_push * kDoubleSize; @@ -742,7 +742,7 @@ void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) { } } -void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register scratch, +void MacroAssembler::MultiPushV128(DoubleRegList dregs, Register scratch, Register location) { int16_t num_to_push = dregs.Count(); int16_t stack_offset = num_to_push * kSimd128Size; @@ -757,7 +757,7 @@ void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register scratch, } } -void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) { +void MacroAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) { int16_t stack_offset = 0; for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) { @@ -770,7 +770,7 @@ void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) { AddS64(location, location, Operand(stack_offset)); } -void TurboAssembler::MultiPopV128(DoubleRegList dregs, Register scratch, +void MacroAssembler::MultiPopV128(DoubleRegList dregs, Register scratch, Register location) { int16_t stack_offset = 0; @@ -784,7 +784,7 @@ void TurboAssembler::MultiPopV128(DoubleRegList dregs, Register scratch, AddS64(location, location, Operand(stack_offset)); } -void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch, +void MacroAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch, Register location) { #if V8_ENABLE_WEBASSEMBLY bool generating_bultins = @@ -818,7 +818,7 @@ void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch, #endif } -void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch, +void MacroAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch, Register location) { #if V8_ENABLE_WEBASSEMBLY bool generating_bultins = @@ -850,7 +850,7 @@ void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch, #endif } -void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) { ASM_CODE_COMMENT(this); if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { mov(destination, Operand(ReadOnlyRootPtr(index), RelocInfo::Mode::NO_INFO)); @@ -859,7 +859,7 @@ void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { LoadRoot(destination, index); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index, +void MacroAssembler::LoadRoot(Register destination, RootIndex index, Condition) { if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { DecompressTaggedPointer(destination, ReadOnlyRootPtr(index)); @@ -869,7 +869,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index, MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0); } -void TurboAssembler::LoadTaggedPointerField(const Register& destination, +void MacroAssembler::LoadTaggedPointerField(const Register& destination, const MemOperand& field_operand, const Register& scratch) { if (COMPRESS_POINTERS_BOOL) { @@ -879,7 +879,7 @@ void TurboAssembler::LoadTaggedPointerField(const Register& destination, } } -void TurboAssembler::LoadAnyTaggedField(const Register& destination, +void MacroAssembler::LoadAnyTaggedField(const Register& destination, const MemOperand& field_operand, const Register& scratch) { if (COMPRESS_POINTERS_BOOL) { @@ -889,7 +889,7 @@ void TurboAssembler::LoadAnyTaggedField(const Register& destination, } } -void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) { if (SmiValuesAre31Bits()) { LoadS32(dst, src); } else { @@ -898,11 +898,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { SmiUntag(dst); } -void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) { SmiUntag(dst, src); } -void TurboAssembler::StoreTaggedField(const Register& value, +void MacroAssembler::StoreTaggedField(const Register& value, const MemOperand& dst_field_operand, const Register& scratch) { if (COMPRESS_POINTERS_BOOL) { @@ -914,21 +914,21 @@ void TurboAssembler::StoreTaggedField(const Register& value, } } -void TurboAssembler::DecompressTaggedSigned(Register destination, +void MacroAssembler::DecompressTaggedSigned(Register destination, Register src) { RecordComment("[ DecompressTaggedSigned"); llgfr(destination, src); RecordComment("]"); } -void TurboAssembler::DecompressTaggedSigned(Register destination, +void MacroAssembler::DecompressTaggedSigned(Register destination, MemOperand field_operand) { RecordComment("[ DecompressTaggedSigned"); llgf(destination, field_operand); RecordComment("]"); } -void TurboAssembler::DecompressTaggedPointer(Register destination, +void MacroAssembler::DecompressTaggedPointer(Register destination, Register source) { RecordComment("[ DecompressTaggedPointer"); llgfr(destination, source); @@ -936,7 +936,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination, RecordComment("]"); } -void TurboAssembler::DecompressTaggedPointer(Register destination, +void MacroAssembler::DecompressTaggedPointer(Register destination, MemOperand field_operand) { RecordComment("[ DecompressTaggedPointer"); llgf(destination, field_operand); @@ -944,14 +944,14 @@ void TurboAssembler::DecompressTaggedPointer(Register destination, RecordComment("]"); } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, +void MacroAssembler::DecompressTaggedPointer(const Register& destination, Tagged_t immediate) { ASM_CODE_COMMENT(this); mov(destination, Operand(immediate, RelocInfo::NO_INFO)); agr(destination, kRootRegister); } -void TurboAssembler::DecompressAnyTagged(Register destination, +void MacroAssembler::DecompressAnyTagged(Register destination, MemOperand field_operand) { RecordComment("[ DecompressAnyTagged"); llgf(destination, field_operand); @@ -959,7 +959,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination, RecordComment("]"); } -void TurboAssembler::DecompressAnyTagged(Register destination, +void MacroAssembler::DecompressAnyTagged(Register destination, Register source) { RecordComment("[ DecompressAnyTagged"); llgfr(destination, source); @@ -967,7 +967,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination, RecordComment("]"); } -void TurboAssembler::LoadTaggedSignedField(Register destination, +void MacroAssembler::LoadTaggedSignedField(Register destination, MemOperand field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressTaggedSigned(destination, field_operand); @@ -1015,17 +1015,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; MultiPush(registers); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { if (registers.is_empty()) return; MultiPop(registers); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode) { DCHECK(!AreAliased(object, slot_address)); @@ -1048,7 +1048,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -1071,7 +1071,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { // Use CallRecordWriteStubSaveRegisters if the object and slot registers @@ -1144,7 +1144,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address, } } -void TurboAssembler::PushCommonFrame(Register marker_reg) { +void MacroAssembler::PushCommonFrame(Register marker_reg) { ASM_CODE_COMMENT(this); int fp_delta = 0; CleanseP(r14); @@ -1158,7 +1158,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { la(fp, MemOperand(sp, fp_delta * kSystemPointerSize)); } -void TurboAssembler::PopCommonFrame(Register marker_reg) { +void MacroAssembler::PopCommonFrame(Register marker_reg) { if (marker_reg.is_valid()) { Pop(r14, fp, marker_reg); } else { @@ -1166,7 +1166,7 @@ void TurboAssembler::PopCommonFrame(Register marker_reg) { } } -void TurboAssembler::PushStandardFrame(Register function_reg) { +void MacroAssembler::PushStandardFrame(Register function_reg) { int fp_delta = 0; CleanseP(r14); if (function_reg.is_valid()) { @@ -1180,7 +1180,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) { Push(kJavaScriptCallArgCountRegister); } -void TurboAssembler::RestoreFrameStateForTailCall() { +void MacroAssembler::RestoreFrameStateForTailCall() { // if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { // LoadU64(kConstantPoolRegister, // MemOperand(fp, StandardFrameConstants::kConstantPoolOffset)); @@ -1191,7 +1191,7 @@ void TurboAssembler::RestoreFrameStateForTailCall() { LoadU64(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); } -void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst, +void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src) { // Turn potential sNaN into qNaN if (dst != src) ldr(dst, src); @@ -1199,11 +1199,11 @@ void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst, sdbr(dst, kDoubleRegZero); } -void TurboAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) { +void MacroAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) { cdfbr(dst, src); } -void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst, +void MacroAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst, Register src) { if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) { cdlfbr(Condition(5), Condition(0), dst, src); @@ -1215,36 +1215,36 @@ void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst, } } -void TurboAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) { +void MacroAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) { cefbra(Condition(4), dst, src); } -void TurboAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst, +void MacroAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst, Register src) { celfbr(Condition(4), Condition(0), dst, src); } -void TurboAssembler::ConvertInt64ToFloat(DoubleRegister double_dst, +void MacroAssembler::ConvertInt64ToFloat(DoubleRegister double_dst, Register src) { cegbr(double_dst, src); } -void TurboAssembler::ConvertInt64ToDouble(DoubleRegister double_dst, +void MacroAssembler::ConvertInt64ToDouble(DoubleRegister double_dst, Register src) { cdgbr(double_dst, src); } -void TurboAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst, +void MacroAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst, Register src) { celgbr(Condition(0), Condition(0), double_dst, src); } -void TurboAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst, +void MacroAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst, Register src) { cdlgbr(Condition(0), Condition(0), double_dst, src); } -void TurboAssembler::ConvertFloat32ToInt64(const Register dst, +void MacroAssembler::ConvertFloat32ToInt64(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1266,7 +1266,7 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst, cgebr(m, dst, double_input); } -void TurboAssembler::ConvertDoubleToInt64(const Register dst, +void MacroAssembler::ConvertDoubleToInt64(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1288,7 +1288,7 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst, cgdbr(m, dst, double_input); } -void TurboAssembler::ConvertDoubleToInt32(const Register dst, +void MacroAssembler::ConvertDoubleToInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1314,7 +1314,7 @@ void TurboAssembler::ConvertDoubleToInt32(const Register dst, cfdbr(m, dst, double_input); } -void TurboAssembler::ConvertFloat32ToInt32(const Register result, +void MacroAssembler::ConvertFloat32ToInt32(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1340,7 +1340,7 @@ void TurboAssembler::ConvertFloat32ToInt32(const Register result, cfebr(m, result, double_input); } -void TurboAssembler::ConvertFloat32ToUnsignedInt32( +void MacroAssembler::ConvertFloat32ToUnsignedInt32( const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1365,7 +1365,7 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32( clfebr(m, Condition(0), result, double_input); } -void TurboAssembler::ConvertFloat32ToUnsignedInt64( +void MacroAssembler::ConvertFloat32ToUnsignedInt64( const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1387,7 +1387,7 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64( clgebr(m, Condition(0), result, double_input); } -void TurboAssembler::ConvertDoubleToUnsignedInt64( +void MacroAssembler::ConvertDoubleToUnsignedInt64( const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1409,7 +1409,7 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64( clgdbr(m, Condition(0), dst, double_input); } -void TurboAssembler::ConvertDoubleToUnsignedInt32( +void MacroAssembler::ConvertDoubleToUnsignedInt32( const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1434,15 +1434,15 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32( clfdbr(m, Condition(0), dst, double_input); } -void TurboAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) { +void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) { lgdr(dst, src); } -void TurboAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) { +void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) { ldgr(dst, src); } -void TurboAssembler::StubPrologue(StackFrame::Type type, Register base, +void MacroAssembler::StubPrologue(StackFrame::Type type, Register base, int prologue_offset) { { ConstantPoolUnavailableScope constant_pool_unavailable(this); @@ -1451,12 +1451,12 @@ void TurboAssembler::StubPrologue(StackFrame::Type type, Register base, } } -void TurboAssembler::Prologue(Register base, int prologue_offset) { +void MacroAssembler::Prologue(Register base, int prologue_offset) { DCHECK(base != no_reg); PushStandardFrame(r3); } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode) { int receiver_bytes = (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0; @@ -1482,7 +1482,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, ArgumentsCountType type, ArgumentsCountMode mode) { @@ -1497,7 +1497,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, } } -void TurboAssembler::EnterFrame(StackFrame::Type type, +void MacroAssembler::EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { ASM_CODE_COMMENT(this); // We create a stack frame with: @@ -1518,7 +1518,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type, #endif // V8_ENABLE_WEBASSEMBLY } -int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { +int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { ASM_CODE_COMMENT(this); // Drop the execution stack down to the frame pointer and restore // the caller frame pointer, return address and constant pool pointer. @@ -1594,7 +1594,7 @@ void MacroAssembler::EnterExitFrame(int stack_space, // Allocate and align the frame preparing for calling the runtime // function. - const int frame_alignment = TurboAssembler::ActivationFrameAlignment(); + const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); if (frame_alignment > 0) { DCHECK_EQ(frame_alignment, 8); ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8 @@ -1608,7 +1608,7 @@ void MacroAssembler::EnterExitFrame(int stack_space, StoreU64(r1, MemOperand(fp, ExitFrameConstants::kSPOffset)); } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if !defined(USE_SIMULATOR) // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -1655,11 +1655,11 @@ void MacroAssembler::LeaveExitFrame(Register argument_count, } } -void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { Move(dst, d0); } -void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { Move(dst, d0); } @@ -1670,10 +1670,10 @@ MemOperand MacroAssembler::StackLimitAsMemOperand(StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); return MemOperand(kRootRegister, offset); } @@ -1977,7 +1977,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, ble(on_in_range); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DoubleRegister double_input, StubCallMode stub_mode) { @@ -2009,7 +2009,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, bind(&done); } -void TurboAssembler::TryInlineTruncateDoubleToI(Register result, +void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label* done) { ConvertDoubleToInt64(result, double_input); @@ -2245,7 +2245,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, } } -void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { +void MacroAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { Label L; b(cond, &L); Abort(reason); @@ -2253,7 +2253,7 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { bind(&L); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); if (v8_flags.code_comments) { @@ -2300,7 +2300,7 @@ void TurboAssembler::Abort(AbortReason reason) { // will not return here } -void TurboAssembler::LoadMap(Register destination, Register object) { +void MacroAssembler::LoadMap(Register destination, Register object) { LoadTaggedPointerField(destination, FieldMemOperand(object, HeapObject::kMapOffset)); } @@ -2314,15 +2314,15 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { } #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) { +void MacroAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) { if (v8_flags.debug_code) Check(cond, reason, cr); } -void TurboAssembler::AssertUnreachable(AbortReason reason) { +void MacroAssembler::AssertUnreachable(AbortReason reason) { if (v8_flags.debug_code) Abort(reason); } -void TurboAssembler::AssertNotSmi(Register object) { +void MacroAssembler::AssertNotSmi(Register object) { if (v8_flags.debug_code) { static_assert(kSmiTag == 0); TestIfSmi(object); @@ -2330,7 +2330,7 @@ void TurboAssembler::AssertNotSmi(Register object) { } } -void TurboAssembler::AssertSmi(Register object) { +void MacroAssembler::AssertSmi(Register object) { if (v8_flags.debug_code) { static_assert(kSmiTag == 0); TestIfSmi(object); @@ -2426,7 +2426,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, static const int kRegisterPassedArguments = 5; -int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, +int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments) { int stack_passed_words = 0; if (num_double_arguments > DoubleRegister::kNumRegisters) { @@ -2440,7 +2440,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, return stack_passed_words; } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { int frame_alignment = ActivationFrameAlignment(); @@ -2463,16 +2463,16 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, lay(sp, MemOperand(sp, (-stack_space) * kSystemPointerSize)); } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, Register scratch) { PrepareCallCFunction(num_reg_arguments, 0, scratch); } -void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); } +void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); } -void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); } +void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); } -void TurboAssembler::MovToFloatParameters(DoubleRegister src1, +void MacroAssembler::MovToFloatParameters(DoubleRegister src1, DoubleRegister src2) { if (src2 == d0) { DCHECK(src1 != d2); @@ -2484,28 +2484,28 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1, } } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { Move(ip, function); CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, +void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments) { CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunctionHelper(Register function, +void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); @@ -2570,7 +2570,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, } } -void TurboAssembler::CheckPageFlag( +void MacroAssembler::CheckPageFlag( Register object, Register scratch, // scratch may be same register as object int mask, Condition cc, Label* condition_met) { @@ -2630,9 +2630,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, UNREACHABLE(); } -void TurboAssembler::mov(Register dst, Register src) { lgr(dst, src); } +void MacroAssembler::mov(Register dst, Register src) { lgr(dst, src); } -void TurboAssembler::mov(Register dst, const Operand& src) { +void MacroAssembler::mov(Register dst, const Operand& src) { int64_t value = 0; if (src.is_heap_number_request()) { @@ -2677,7 +2677,7 @@ void TurboAssembler::mov(Register dst, const Operand& src) { iilf(dst, Operand(lo_32)); } -void TurboAssembler::MulS32(Register dst, const MemOperand& src1) { +void MacroAssembler::MulS32(Register dst, const MemOperand& src1) { if (is_uint12(src1.offset())) { ms(dst, src1); } else if (is_int20(src1.offset())) { @@ -2687,9 +2687,9 @@ void TurboAssembler::MulS32(Register dst, const MemOperand& src1) { } } -void TurboAssembler::MulS32(Register dst, Register src1) { msr(dst, src1); } +void MacroAssembler::MulS32(Register dst, Register src1) { msr(dst, src1); } -void TurboAssembler::MulS32(Register dst, const Operand& src1) { +void MacroAssembler::MulS32(Register dst, const Operand& src1) { msfi(dst, src1); } @@ -2700,19 +2700,19 @@ void TurboAssembler::MulS32(Register dst, const Operand& src1) { srlg(dst, dst, Operand(32)); \ } -void TurboAssembler::MulHighS32(Register dst, Register src1, +void MacroAssembler::MulHighS32(Register dst, Register src1, const MemOperand& src2) { Generate_MulHigh32(msgf); } -void TurboAssembler::MulHighS32(Register dst, Register src1, Register src2) { +void MacroAssembler::MulHighS32(Register dst, Register src1, Register src2) { if (dst == src2) { std::swap(src1, src2); } Generate_MulHigh32(msgfr); } -void TurboAssembler::MulHighS32(Register dst, Register src1, +void MacroAssembler::MulHighS32(Register dst, Register src1, const Operand& src2) { Generate_MulHigh32(msgfi); } @@ -2726,16 +2726,16 @@ void TurboAssembler::MulHighS32(Register dst, Register src1, LoadU32(dst, r0); \ } -void TurboAssembler::MulHighU32(Register dst, Register src1, +void MacroAssembler::MulHighU32(Register dst, Register src1, const MemOperand& src2) { Generate_MulHighU32(ml); } -void TurboAssembler::MulHighU32(Register dst, Register src1, Register src2) { +void MacroAssembler::MulHighU32(Register dst, Register src1, Register src2) { Generate_MulHighU32(mlr); } -void TurboAssembler::MulHighU32(Register dst, Register src1, +void MacroAssembler::MulHighU32(Register dst, Register src1, const Operand& src2) { USE(dst); USE(src1); @@ -2752,7 +2752,7 @@ void TurboAssembler::MulHighU32(Register dst, Register src1, cgfr(dst, dst); \ } -void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, +void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, const MemOperand& src2) { Register result = dst; if (src2.rx() == dst || src2.rb() == dst) dst = r0; @@ -2760,7 +2760,7 @@ void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, if (result != dst) llgfr(result, dst); } -void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, +void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, Register src2) { if (dst == src2) { std::swap(src1, src2); @@ -2768,7 +2768,7 @@ void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, Generate_Mul32WithOverflowIfCCUnequal(msgfr); } -void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, +void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, const Operand& src2) { Generate_Mul32WithOverflowIfCCUnequal(msgfi); } @@ -2782,12 +2782,12 @@ void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, LoadU32(dst, r1); \ } -void TurboAssembler::DivS32(Register dst, Register src1, +void MacroAssembler::DivS32(Register dst, Register src1, const MemOperand& src2) { Generate_Div32(dsgf); } -void TurboAssembler::DivS32(Register dst, Register src1, Register src2) { +void MacroAssembler::DivS32(Register dst, Register src1, Register src2) { Generate_Div32(dsgfr); } @@ -2801,12 +2801,12 @@ void TurboAssembler::DivS32(Register dst, Register src1, Register src2) { LoadU32(dst, r1); \ } -void TurboAssembler::DivU32(Register dst, Register src1, +void MacroAssembler::DivU32(Register dst, Register src1, const MemOperand& src2) { Generate_DivU32(dl); } -void TurboAssembler::DivU32(Register dst, Register src1, Register src2) { +void MacroAssembler::DivU32(Register dst, Register src1, Register src2) { Generate_DivU32(dlr); } @@ -2819,12 +2819,12 @@ void TurboAssembler::DivU32(Register dst, Register src1, Register src2) { lgr(dst, r1); \ } -void TurboAssembler::DivS64(Register dst, Register src1, +void MacroAssembler::DivS64(Register dst, Register src1, const MemOperand& src2) { Generate_Div64(dsg); } -void TurboAssembler::DivS64(Register dst, Register src1, Register src2) { +void MacroAssembler::DivS64(Register dst, Register src1, Register src2) { Generate_Div64(dsgr); } @@ -2838,12 +2838,12 @@ void TurboAssembler::DivS64(Register dst, Register src1, Register src2) { lgr(dst, r1); \ } -void TurboAssembler::DivU64(Register dst, Register src1, +void MacroAssembler::DivU64(Register dst, Register src1, const MemOperand& src2) { Generate_DivU64(dlg); } -void TurboAssembler::DivU64(Register dst, Register src1, Register src2) { +void MacroAssembler::DivU64(Register dst, Register src1, Register src2) { Generate_DivU64(dlgr); } @@ -2856,12 +2856,12 @@ void TurboAssembler::DivU64(Register dst, Register src1, Register src2) { LoadU32(dst, r0); \ } -void TurboAssembler::ModS32(Register dst, Register src1, +void MacroAssembler::ModS32(Register dst, Register src1, const MemOperand& src2) { Generate_Mod32(dsgf); } -void TurboAssembler::ModS32(Register dst, Register src1, Register src2) { +void MacroAssembler::ModS32(Register dst, Register src1, Register src2) { Generate_Mod32(dsgfr); } @@ -2875,12 +2875,12 @@ void TurboAssembler::ModS32(Register dst, Register src1, Register src2) { LoadU32(dst, r0); \ } -void TurboAssembler::ModU32(Register dst, Register src1, +void MacroAssembler::ModU32(Register dst, Register src1, const MemOperand& src2) { Generate_ModU32(dl); } -void TurboAssembler::ModU32(Register dst, Register src1, Register src2) { +void MacroAssembler::ModU32(Register dst, Register src1, Register src2) { Generate_ModU32(dlr); } @@ -2893,12 +2893,12 @@ void TurboAssembler::ModU32(Register dst, Register src1, Register src2) { lgr(dst, r0); \ } -void TurboAssembler::ModS64(Register dst, Register src1, +void MacroAssembler::ModS64(Register dst, Register src1, const MemOperand& src2) { Generate_Mod64(dsg); } -void TurboAssembler::ModS64(Register dst, Register src1, Register src2) { +void MacroAssembler::ModS64(Register dst, Register src1, Register src2) { Generate_Mod64(dsgr); } @@ -2912,54 +2912,54 @@ void TurboAssembler::ModS64(Register dst, Register src1, Register src2) { lgr(dst, r0); \ } -void TurboAssembler::ModU64(Register dst, Register src1, +void MacroAssembler::ModU64(Register dst, Register src1, const MemOperand& src2) { Generate_ModU64(dlg); } -void TurboAssembler::ModU64(Register dst, Register src1, Register src2) { +void MacroAssembler::ModU64(Register dst, Register src1, Register src2) { Generate_ModU64(dlgr); } #undef Generate_ModU64 -void TurboAssembler::MulS64(Register dst, const Operand& opnd) { +void MacroAssembler::MulS64(Register dst, const Operand& opnd) { msgfi(dst, opnd); } -void TurboAssembler::MulS64(Register dst, Register src) { msgr(dst, src); } +void MacroAssembler::MulS64(Register dst, Register src) { msgr(dst, src); } -void TurboAssembler::MulS64(Register dst, const MemOperand& opnd) { +void MacroAssembler::MulS64(Register dst, const MemOperand& opnd) { msg(dst, opnd); } -void TurboAssembler::MulHighS64(Register dst, Register src1, Register src2) { +void MacroAssembler::MulHighS64(Register dst, Register src1, Register src2) { mgrk(r0, src1, src2); lgr(dst, r0); } -void TurboAssembler::MulHighS64(Register dst, Register src1, +void MacroAssembler::MulHighS64(Register dst, Register src1, const MemOperand& src2) { // TODO(v8): implement this. UNIMPLEMENTED(); } -void TurboAssembler::MulHighU64(Register dst, Register src1, Register src2) { +void MacroAssembler::MulHighU64(Register dst, Register src1, Register src2) { lgr(r1, src1); mlgr(r0, src2); lgr(dst, r0); } -void TurboAssembler::MulHighU64(Register dst, Register src1, +void MacroAssembler::MulHighU64(Register dst, Register src1, const MemOperand& src2) { // TODO(v8): implement this. UNIMPLEMENTED(); } -void TurboAssembler::Sqrt(DoubleRegister result, DoubleRegister input) { +void MacroAssembler::Sqrt(DoubleRegister result, DoubleRegister input) { sqdbr(result, input); } -void TurboAssembler::Sqrt(DoubleRegister result, const MemOperand& input) { +void MacroAssembler::Sqrt(DoubleRegister result, const MemOperand& input) { if (is_uint12(input.offset())) { sqdb(result, input); } else { @@ -2972,7 +2972,7 @@ void TurboAssembler::Sqrt(DoubleRegister result, const MemOperand& input) { //---------------------------------------------------------------------------- // Add 32-bit (Register dst = Register dst + Immediate opnd) -void TurboAssembler::AddS32(Register dst, const Operand& opnd) { +void MacroAssembler::AddS32(Register dst, const Operand& opnd) { if (is_int16(opnd.immediate())) ahi(dst, opnd); else @@ -2980,19 +2980,19 @@ void TurboAssembler::AddS32(Register dst, const Operand& opnd) { } // Add Pointer Size (Register dst = Register dst + Immediate opnd) -void TurboAssembler::AddS64(Register dst, const Operand& opnd) { +void MacroAssembler::AddS64(Register dst, const Operand& opnd) { if (is_int16(opnd.immediate())) aghi(dst, opnd); else agfi(dst, opnd); } -void TurboAssembler::AddS32(Register dst, Register src, int32_t opnd) { +void MacroAssembler::AddS32(Register dst, Register src, int32_t opnd) { AddS32(dst, src, Operand(opnd)); } // Add 32-bit (Register dst = Register src + Immediate opnd) -void TurboAssembler::AddS32(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::AddS32(Register dst, Register src, const Operand& opnd) { if (dst != src) { if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) { ahik(dst, src, opnd); @@ -3003,12 +3003,12 @@ void TurboAssembler::AddS32(Register dst, Register src, const Operand& opnd) { AddS32(dst, opnd); } -void TurboAssembler::AddS64(Register dst, Register src, int32_t opnd) { +void MacroAssembler::AddS64(Register dst, Register src, int32_t opnd) { AddS64(dst, src, Operand(opnd)); } // Add Pointer Size (Register dst = Register src + Immediate opnd) -void TurboAssembler::AddS64(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::AddS64(Register dst, Register src, const Operand& opnd) { if (dst != src) { if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) { aghik(dst, src, opnd); @@ -3020,13 +3020,13 @@ void TurboAssembler::AddS64(Register dst, Register src, const Operand& opnd) { } // Add 32-bit (Register dst = Register dst + Register src) -void TurboAssembler::AddS32(Register dst, Register src) { ar(dst, src); } +void MacroAssembler::AddS32(Register dst, Register src) { ar(dst, src); } // Add Pointer Size (Register dst = Register dst + Register src) -void TurboAssembler::AddS64(Register dst, Register src) { agr(dst, src); } +void MacroAssembler::AddS64(Register dst, Register src) { agr(dst, src); } // Add 32-bit (Register dst = Register src1 + Register src2) -void TurboAssembler::AddS32(Register dst, Register src1, Register src2) { +void MacroAssembler::AddS32(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK // as AR is a smaller instruction @@ -3043,7 +3043,7 @@ void TurboAssembler::AddS32(Register dst, Register src1, Register src2) { } // Add Pointer Size (Register dst = Register src1 + Register src2) -void TurboAssembler::AddS64(Register dst, Register src1, Register src2) { +void MacroAssembler::AddS64(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK // as AR is a smaller instruction @@ -3060,7 +3060,7 @@ void TurboAssembler::AddS64(Register dst, Register src1, Register src2) { } // Add 32-bit (Register-Memory) -void TurboAssembler::AddS32(Register dst, const MemOperand& opnd) { +void MacroAssembler::AddS32(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) a(dst, opnd); @@ -3069,13 +3069,13 @@ void TurboAssembler::AddS32(Register dst, const MemOperand& opnd) { } // Add Pointer Size (Register-Memory) -void TurboAssembler::AddS64(Register dst, const MemOperand& opnd) { +void MacroAssembler::AddS64(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); ag(dst, opnd); } // Add 32-bit (Memory - Immediate) -void TurboAssembler::AddS32(const MemOperand& opnd, const Operand& imm) { +void MacroAssembler::AddS32(const MemOperand& opnd, const Operand& imm) { DCHECK(is_int8(imm.immediate())); DCHECK(is_int20(opnd.offset())); DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT)); @@ -3083,7 +3083,7 @@ void TurboAssembler::AddS32(const MemOperand& opnd, const Operand& imm) { } // Add Pointer-sized (Memory - Immediate) -void TurboAssembler::AddS64(const MemOperand& opnd, const Operand& imm) { +void MacroAssembler::AddS64(const MemOperand& opnd, const Operand& imm) { DCHECK(is_int8(imm.immediate())); DCHECK(is_int20(opnd.offset())); DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT)); @@ -3095,7 +3095,7 @@ void TurboAssembler::AddS64(const MemOperand& opnd, const Operand& imm) { //---------------------------------------------------------------------------- // Add Logical 32-bit (Register dst = Register src1 + Register src2) -void TurboAssembler::AddU32(Register dst, Register src1, Register src2) { +void MacroAssembler::AddU32(Register dst, Register src1, Register src2) { if (dst != src2 && dst != src1) { lr(dst, src1); alr(dst, src2); @@ -3111,16 +3111,16 @@ void TurboAssembler::AddU32(Register dst, Register src1, Register src2) { } // Add Logical 32-bit (Register dst = Register dst + Immediate opnd) -void TurboAssembler::AddU32(Register dst, const Operand& imm) { +void MacroAssembler::AddU32(Register dst, const Operand& imm) { alfi(dst, imm); } // Add Logical Pointer Size (Register dst = Register dst + Immediate opnd) -void TurboAssembler::AddU64(Register dst, const Operand& imm) { +void MacroAssembler::AddU64(Register dst, const Operand& imm) { algfi(dst, imm); } -void TurboAssembler::AddU64(Register dst, Register src1, Register src2) { +void MacroAssembler::AddU64(Register dst, Register src1, Register src2) { if (dst != src2 && dst != src1) { if (CpuFeatures::IsSupported(DISTINCT_OPS)) { algrk(dst, src1, src2); @@ -3140,7 +3140,7 @@ void TurboAssembler::AddU64(Register dst, Register src1, Register src2) { } // Add Logical 32-bit (Register-Memory) -void TurboAssembler::AddU32(Register dst, const MemOperand& opnd) { +void MacroAssembler::AddU32(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) al_z(dst, opnd); @@ -3149,7 +3149,7 @@ void TurboAssembler::AddU32(Register dst, const MemOperand& opnd) { } // Add Logical Pointer Size (Register-Memory) -void TurboAssembler::AddU64(Register dst, const MemOperand& opnd) { +void MacroAssembler::AddU64(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); alg(dst, opnd); } @@ -3159,7 +3159,7 @@ void TurboAssembler::AddU64(Register dst, const MemOperand& opnd) { //---------------------------------------------------------------------------- // Subtract Logical 32-bit (Register dst = Register src1 - Register src2) -void TurboAssembler::SubU32(Register dst, Register src1, Register src2) { +void MacroAssembler::SubU32(Register dst, Register src1, Register src2) { if (dst != src2 && dst != src1) { lr(dst, src1); slr(dst, src2); @@ -3176,41 +3176,41 @@ void TurboAssembler::SubU32(Register dst, Register src1, Register src2) { } // Subtract 32-bit (Register dst = Register dst - Immediate opnd) -void TurboAssembler::SubS32(Register dst, const Operand& imm) { +void MacroAssembler::SubS32(Register dst, const Operand& imm) { AddS32(dst, Operand(-(imm.immediate()))); } // Subtract Pointer Size (Register dst = Register dst - Immediate opnd) -void TurboAssembler::SubS64(Register dst, const Operand& imm) { +void MacroAssembler::SubS64(Register dst, const Operand& imm) { AddS64(dst, Operand(-(imm.immediate()))); } -void TurboAssembler::SubS32(Register dst, Register src, int32_t imm) { +void MacroAssembler::SubS32(Register dst, Register src, int32_t imm) { SubS32(dst, src, Operand(imm)); } // Subtract 32-bit (Register dst = Register src - Immediate opnd) -void TurboAssembler::SubS32(Register dst, Register src, const Operand& imm) { +void MacroAssembler::SubS32(Register dst, Register src, const Operand& imm) { AddS32(dst, src, Operand(-(imm.immediate()))); } -void TurboAssembler::SubS64(Register dst, Register src, int32_t imm) { +void MacroAssembler::SubS64(Register dst, Register src, int32_t imm) { SubS64(dst, src, Operand(imm)); } // Subtract Pointer Sized (Register dst = Register src - Immediate opnd) -void TurboAssembler::SubS64(Register dst, Register src, const Operand& imm) { +void MacroAssembler::SubS64(Register dst, Register src, const Operand& imm) { AddS64(dst, src, Operand(-(imm.immediate()))); } // Subtract 32-bit (Register dst = Register dst - Register src) -void TurboAssembler::SubS32(Register dst, Register src) { sr(dst, src); } +void MacroAssembler::SubS32(Register dst, Register src) { sr(dst, src); } // Subtract Pointer Size (Register dst = Register dst - Register src) -void TurboAssembler::SubS64(Register dst, Register src) { sgr(dst, src); } +void MacroAssembler::SubS64(Register dst, Register src) { sgr(dst, src); } // Subtract 32-bit (Register = Register - Register) -void TurboAssembler::SubS32(Register dst, Register src1, Register src2) { +void MacroAssembler::SubS32(Register dst, Register src1, Register src2) { // Use non-clobbering version if possible if (CpuFeatures::IsSupported(DISTINCT_OPS)) { srk(dst, src1, src2); @@ -3230,7 +3230,7 @@ void TurboAssembler::SubS32(Register dst, Register src1, Register src2) { } // Subtract Pointer Sized (Register = Register - Register) -void TurboAssembler::SubS64(Register dst, Register src1, Register src2) { +void MacroAssembler::SubS64(Register dst, Register src1, Register src2) { // Use non-clobbering version if possible if (CpuFeatures::IsSupported(DISTINCT_OPS)) { sgrk(dst, src1, src2); @@ -3250,7 +3250,7 @@ void TurboAssembler::SubS64(Register dst, Register src1, Register src2) { } // Subtract 32-bit (Register-Memory) -void TurboAssembler::SubS32(Register dst, const MemOperand& opnd) { +void MacroAssembler::SubS32(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) s(dst, opnd); @@ -3259,7 +3259,7 @@ void TurboAssembler::SubS32(Register dst, const MemOperand& opnd) { } // Subtract Pointer Sized (Register - Memory) -void TurboAssembler::SubS64(Register dst, const MemOperand& opnd) { +void MacroAssembler::SubS64(Register dst, const MemOperand& opnd) { #if V8_TARGET_ARCH_S390X sg(dst, opnd); #else @@ -3267,24 +3267,24 @@ void TurboAssembler::SubS64(Register dst, const MemOperand& opnd) { #endif } -void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) { +void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) { sllg(r0, src, Operand(32)); ldgr(dst, r0); } -void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) { +void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) { lgdr(dst, src); srlg(dst, dst, Operand(32)); } // Load And Subtract 32-bit (similar to laa/lan/lao/lax) -void TurboAssembler::LoadAndSub32(Register dst, Register src, +void MacroAssembler::LoadAndSub32(Register dst, Register src, const MemOperand& opnd) { lcr(dst, src); laa(dst, dst, opnd); } -void TurboAssembler::LoadAndSub64(Register dst, Register src, +void MacroAssembler::LoadAndSub64(Register dst, Register src, const MemOperand& opnd) { lcgr(dst, src); laag(dst, dst, opnd); @@ -3295,7 +3295,7 @@ void TurboAssembler::LoadAndSub64(Register dst, Register src, //---------------------------------------------------------------------------- // Subtract Logical 32-bit (Register - Memory) -void TurboAssembler::SubU32(Register dst, const MemOperand& opnd) { +void MacroAssembler::SubU32(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) sl(dst, opnd); @@ -3304,7 +3304,7 @@ void TurboAssembler::SubU32(Register dst, const MemOperand& opnd) { } // Subtract Logical Pointer Sized (Register - Memory) -void TurboAssembler::SubU64(Register dst, const MemOperand& opnd) { +void MacroAssembler::SubU64(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); #if V8_TARGET_ARCH_S390X slgf(dst, opnd); @@ -3318,13 +3318,13 @@ void TurboAssembler::SubU64(Register dst, const MemOperand& opnd) { //---------------------------------------------------------------------------- // AND 32-bit - dst = dst & src -void TurboAssembler::And(Register dst, Register src) { nr(dst, src); } +void MacroAssembler::And(Register dst, Register src) { nr(dst, src); } // AND Pointer Size - dst = dst & src -void TurboAssembler::AndP(Register dst, Register src) { ngr(dst, src); } +void MacroAssembler::AndP(Register dst, Register src) { ngr(dst, src); } // Non-clobbering AND 32-bit - dst = src1 & src1 -void TurboAssembler::And(Register dst, Register src1, Register src2) { +void MacroAssembler::And(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate XR/XGR, over the non clobbering XRK/XRK // as XR is a smaller instruction @@ -3341,7 +3341,7 @@ void TurboAssembler::And(Register dst, Register src1, Register src2) { } // Non-clobbering AND pointer size - dst = src1 & src1 -void TurboAssembler::AndP(Register dst, Register src1, Register src2) { +void MacroAssembler::AndP(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate XR/XGR, over the non clobbering XRK/XRK // as XR is a smaller instruction @@ -3358,7 +3358,7 @@ void TurboAssembler::AndP(Register dst, Register src1, Register src2) { } // AND 32-bit (Reg - Mem) -void TurboAssembler::And(Register dst, const MemOperand& opnd) { +void MacroAssembler::And(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) n(dst, opnd); @@ -3367,7 +3367,7 @@ void TurboAssembler::And(Register dst, const MemOperand& opnd) { } // AND Pointer Size (Reg - Mem) -void TurboAssembler::AndP(Register dst, const MemOperand& opnd) { +void MacroAssembler::AndP(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); #if V8_TARGET_ARCH_S390X ng(dst, opnd); @@ -3377,10 +3377,10 @@ void TurboAssembler::AndP(Register dst, const MemOperand& opnd) { } // AND 32-bit - dst = dst & imm -void TurboAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); } +void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); } // AND Pointer Size - dst = dst & imm -void TurboAssembler::AndP(Register dst, const Operand& opnd) { +void MacroAssembler::AndP(Register dst, const Operand& opnd) { #if V8_TARGET_ARCH_S390X intptr_t value = opnd.immediate(); if (value >> 32 != -1) { @@ -3394,13 +3394,13 @@ void TurboAssembler::AndP(Register dst, const Operand& opnd) { } // AND 32-bit - dst = src & imm -void TurboAssembler::And(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::And(Register dst, Register src, const Operand& opnd) { if (dst != src) lr(dst, src); nilf(dst, opnd); } // AND Pointer Size - dst = src & imm -void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) { // Try to exploit RISBG first intptr_t value = opnd.immediate(); if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { @@ -3441,13 +3441,13 @@ void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) { } // OR 32-bit - dst = dst & src -void TurboAssembler::Or(Register dst, Register src) { or_z(dst, src); } +void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); } // OR Pointer Size - dst = dst & src -void TurboAssembler::OrP(Register dst, Register src) { ogr(dst, src); } +void MacroAssembler::OrP(Register dst, Register src) { ogr(dst, src); } // Non-clobbering OR 32-bit - dst = src1 & src1 -void TurboAssembler::Or(Register dst, Register src1, Register src2) { +void MacroAssembler::Or(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate XR/XGR, over the non clobbering XRK/XRK // as XR is a smaller instruction @@ -3464,7 +3464,7 @@ void TurboAssembler::Or(Register dst, Register src1, Register src2) { } // Non-clobbering OR pointer size - dst = src1 & src1 -void TurboAssembler::OrP(Register dst, Register src1, Register src2) { +void MacroAssembler::OrP(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate XR/XGR, over the non clobbering XRK/XRK // as XR is a smaller instruction @@ -3481,7 +3481,7 @@ void TurboAssembler::OrP(Register dst, Register src1, Register src2) { } // OR 32-bit (Reg - Mem) -void TurboAssembler::Or(Register dst, const MemOperand& opnd) { +void MacroAssembler::Or(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) o(dst, opnd); @@ -3490,7 +3490,7 @@ void TurboAssembler::Or(Register dst, const MemOperand& opnd) { } // OR Pointer Size (Reg - Mem) -void TurboAssembler::OrP(Register dst, const MemOperand& opnd) { +void MacroAssembler::OrP(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); #if V8_TARGET_ARCH_S390X og(dst, opnd); @@ -3500,10 +3500,10 @@ void TurboAssembler::OrP(Register dst, const MemOperand& opnd) { } // OR 32-bit - dst = dst & imm -void TurboAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); } +void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); } // OR Pointer Size - dst = dst & imm -void TurboAssembler::OrP(Register dst, const Operand& opnd) { +void MacroAssembler::OrP(Register dst, const Operand& opnd) { #if V8_TARGET_ARCH_S390X intptr_t value = opnd.immediate(); if (value >> 32 != 0) { @@ -3517,25 +3517,25 @@ void TurboAssembler::OrP(Register dst, const Operand& opnd) { } // OR 32-bit - dst = src & imm -void TurboAssembler::Or(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) { if (dst != src) lr(dst, src); oilf(dst, opnd); } // OR Pointer Size - dst = src & imm -void TurboAssembler::OrP(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) { if (dst != src) mov(dst, src); OrP(dst, opnd); } // XOR 32-bit - dst = dst & src -void TurboAssembler::Xor(Register dst, Register src) { xr(dst, src); } +void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); } // XOR Pointer Size - dst = dst & src -void TurboAssembler::XorP(Register dst, Register src) { xgr(dst, src); } +void MacroAssembler::XorP(Register dst, Register src) { xgr(dst, src); } // Non-clobbering XOR 32-bit - dst = src1 & src1 -void TurboAssembler::Xor(Register dst, Register src1, Register src2) { +void MacroAssembler::Xor(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate XR/XGR, over the non clobbering XRK/XRK // as XR is a smaller instruction @@ -3552,7 +3552,7 @@ void TurboAssembler::Xor(Register dst, Register src1, Register src2) { } // Non-clobbering XOR pointer size - dst = src1 & src1 -void TurboAssembler::XorP(Register dst, Register src1, Register src2) { +void MacroAssembler::XorP(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate XR/XGR, over the non clobbering XRK/XRK // as XR is a smaller instruction @@ -3569,7 +3569,7 @@ void TurboAssembler::XorP(Register dst, Register src1, Register src2) { } // XOR 32-bit (Reg - Mem) -void TurboAssembler::Xor(Register dst, const MemOperand& opnd) { +void MacroAssembler::Xor(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) x(dst, opnd); @@ -3578,7 +3578,7 @@ void TurboAssembler::Xor(Register dst, const MemOperand& opnd) { } // XOR Pointer Size (Reg - Mem) -void TurboAssembler::XorP(Register dst, const MemOperand& opnd) { +void MacroAssembler::XorP(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); #if V8_TARGET_ARCH_S390X xg(dst, opnd); @@ -3588,10 +3588,10 @@ void TurboAssembler::XorP(Register dst, const MemOperand& opnd) { } // XOR 32-bit - dst = dst & imm -void TurboAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); } +void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); } // XOR Pointer Size - dst = dst & imm -void TurboAssembler::XorP(Register dst, const Operand& opnd) { +void MacroAssembler::XorP(Register dst, const Operand& opnd) { #if V8_TARGET_ARCH_S390X intptr_t value = opnd.immediate(); xihf(dst, Operand(value >> 32)); @@ -3602,29 +3602,29 @@ void TurboAssembler::XorP(Register dst, const Operand& opnd) { } // XOR 32-bit - dst = src & imm -void TurboAssembler::Xor(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) { if (dst != src) lr(dst, src); xilf(dst, opnd); } // XOR Pointer Size - dst = src & imm -void TurboAssembler::XorP(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) { if (dst != src) mov(dst, src); XorP(dst, opnd); } -void TurboAssembler::Not32(Register dst, Register src) { +void MacroAssembler::Not32(Register dst, Register src) { if (src != no_reg && src != dst) lr(dst, src); xilf(dst, Operand(0xFFFFFFFF)); } -void TurboAssembler::Not64(Register dst, Register src) { +void MacroAssembler::Not64(Register dst, Register src) { if (src != no_reg && src != dst) lgr(dst, src); xihf(dst, Operand(0xFFFFFFFF)); xilf(dst, Operand(0xFFFFFFFF)); } -void TurboAssembler::NotP(Register dst, Register src) { +void MacroAssembler::NotP(Register dst, Register src) { #if V8_TARGET_ARCH_S390X Not64(dst, src); #else @@ -3632,7 +3632,7 @@ void TurboAssembler::NotP(Register dst, Register src) { #endif } -void TurboAssembler::LoadPositiveP(Register result, Register input) { +void MacroAssembler::LoadPositiveP(Register result, Register input) { #if V8_TARGET_ARCH_S390X lpgr(result, input); #else @@ -3640,7 +3640,7 @@ void TurboAssembler::LoadPositiveP(Register result, Register input) { #endif } -void TurboAssembler::LoadPositive32(Register result, Register input) { +void MacroAssembler::LoadPositive32(Register result, Register input) { lpr(result, input); lgfr(result, result); } @@ -3650,14 +3650,14 @@ void TurboAssembler::LoadPositive32(Register result, Register input) { //----------------------------------------------------------------------------- // Compare 32-bit Register vs Register -void TurboAssembler::CmpS32(Register src1, Register src2) { cr_z(src1, src2); } +void MacroAssembler::CmpS32(Register src1, Register src2) { cr_z(src1, src2); } // Compare Pointer Sized Register vs Register -void TurboAssembler::CmpS64(Register src1, Register src2) { cgr(src1, src2); } +void MacroAssembler::CmpS64(Register src1, Register src2) { cgr(src1, src2); } // Compare 32-bit Register vs Immediate // This helper will set up proper relocation entries if required. -void TurboAssembler::CmpS32(Register dst, const Operand& opnd) { +void MacroAssembler::CmpS32(Register dst, const Operand& opnd) { if (opnd.rmode() == RelocInfo::NO_INFO) { intptr_t value = opnd.immediate(); if (is_int16(value)) @@ -3673,7 +3673,7 @@ void TurboAssembler::CmpS32(Register dst, const Operand& opnd) { // Compare Pointer Sized Register vs Immediate // This helper will set up proper relocation entries if required. -void TurboAssembler::CmpS64(Register dst, const Operand& opnd) { +void MacroAssembler::CmpS64(Register dst, const Operand& opnd) { if (opnd.rmode() == RelocInfo::NO_INFO) { cgfi(dst, opnd); } else { @@ -3683,7 +3683,7 @@ void TurboAssembler::CmpS64(Register dst, const Operand& opnd) { } // Compare 32-bit Register vs Memory -void TurboAssembler::CmpS32(Register dst, const MemOperand& opnd) { +void MacroAssembler::CmpS32(Register dst, const MemOperand& opnd) { // make sure offset is within 20 bit range DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) @@ -3693,14 +3693,14 @@ void TurboAssembler::CmpS32(Register dst, const MemOperand& opnd) { } // Compare Pointer Size Register vs Memory -void TurboAssembler::CmpS64(Register dst, const MemOperand& opnd) { +void MacroAssembler::CmpS64(Register dst, const MemOperand& opnd) { // make sure offset is within 20 bit range DCHECK(is_int20(opnd.offset())); cg(dst, opnd); } // Using cs or scy based on the offset -void TurboAssembler::CmpAndSwap(Register old_val, Register new_val, +void MacroAssembler::CmpAndSwap(Register old_val, Register new_val, const MemOperand& opnd) { if (is_uint12(opnd.offset())) { cs(old_val, new_val, opnd); @@ -3709,7 +3709,7 @@ void TurboAssembler::CmpAndSwap(Register old_val, Register new_val, } } -void TurboAssembler::CmpAndSwap64(Register old_val, Register new_val, +void MacroAssembler::CmpAndSwap64(Register old_val, Register new_val, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); csg(old_val, new_val, opnd); @@ -3720,10 +3720,10 @@ void TurboAssembler::CmpAndSwap64(Register old_val, Register new_val, //----------------------------------------------------------------------------- // Compare Logical 32-bit Register vs Register -void TurboAssembler::CmpU32(Register dst, Register src) { clr(dst, src); } +void MacroAssembler::CmpU32(Register dst, Register src) { clr(dst, src); } // Compare Logical Pointer Sized Register vs Register -void TurboAssembler::CmpU64(Register dst, Register src) { +void MacroAssembler::CmpU64(Register dst, Register src) { #ifdef V8_TARGET_ARCH_S390X clgr(dst, src); #else @@ -3732,12 +3732,12 @@ void TurboAssembler::CmpU64(Register dst, Register src) { } // Compare Logical 32-bit Register vs Immediate -void TurboAssembler::CmpU32(Register dst, const Operand& opnd) { +void MacroAssembler::CmpU32(Register dst, const Operand& opnd) { clfi(dst, opnd); } // Compare Logical Pointer Sized Register vs Immediate -void TurboAssembler::CmpU64(Register dst, const Operand& opnd) { +void MacroAssembler::CmpU64(Register dst, const Operand& opnd) { #if V8_TARGET_ARCH_S390X DCHECK_EQ(static_cast(opnd.immediate() >> 32), 0); clgfi(dst, opnd); @@ -3747,7 +3747,7 @@ void TurboAssembler::CmpU64(Register dst, const Operand& opnd) { } // Compare Logical 32-bit Register vs Memory -void TurboAssembler::CmpU32(Register dst, const MemOperand& opnd) { +void MacroAssembler::CmpU32(Register dst, const MemOperand& opnd) { // make sure offset is within 20 bit range DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) @@ -3757,7 +3757,7 @@ void TurboAssembler::CmpU32(Register dst, const MemOperand& opnd) { } // Compare Logical Pointer Sized Register vs Memory -void TurboAssembler::CmpU64(Register dst, const MemOperand& opnd) { +void MacroAssembler::CmpU64(Register dst, const MemOperand& opnd) { // make sure offset is within 20 bit range DCHECK(is_int20(opnd.offset())); #if V8_TARGET_ARCH_S390X @@ -3767,7 +3767,7 @@ void TurboAssembler::CmpU64(Register dst, const MemOperand& opnd) { #endif } -void TurboAssembler::Branch(Condition c, const Operand& opnd) { +void MacroAssembler::Branch(Condition c, const Operand& opnd) { intptr_t value = opnd.immediate(); if (is_int16(value)) brc(c, opnd); @@ -3776,7 +3776,7 @@ void TurboAssembler::Branch(Condition c, const Operand& opnd) { } // Branch On Count. Decrement R1, and branch if R1 != 0. -void TurboAssembler::BranchOnCount(Register r1, Label* l) { +void MacroAssembler::BranchOnCount(Register r1, Label* l) { int32_t offset = branch_offset(l); if (is_int16(offset)) { #if V8_TARGET_ARCH_S390X @@ -3790,7 +3790,7 @@ void TurboAssembler::BranchOnCount(Register r1, Label* l) { } } -void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) { +void MacroAssembler::LoadSmiLiteral(Register dst, Smi smi) { intptr_t value = static_cast(smi.ptr()); #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) llilf(dst, Operand(value)); @@ -3801,7 +3801,7 @@ void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) { #endif } -void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) { +void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) { #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) // CFI takes 32-bit immediate. cfi(src1, Operand(smi)); @@ -3815,7 +3815,7 @@ void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) { #endif } -void TurboAssembler::LoadU64(Register dst, const MemOperand& mem, +void MacroAssembler::LoadU64(Register dst, const MemOperand& mem, Register scratch) { int offset = mem.offset(); @@ -3830,7 +3830,7 @@ void TurboAssembler::LoadU64(Register dst, const MemOperand& mem, } // Store a "pointer" sized value to the memory location -void TurboAssembler::StoreU64(Register src, const MemOperand& mem, +void MacroAssembler::StoreU64(Register src, const MemOperand& mem, Register scratch) { if (!is_int20(mem.offset())) { DCHECK(scratch != no_reg); @@ -3843,7 +3843,7 @@ void TurboAssembler::StoreU64(Register src, const MemOperand& mem, } // Store a "pointer" sized constant to the memory location -void TurboAssembler::StoreU64(const MemOperand& mem, const Operand& opnd, +void MacroAssembler::StoreU64(const MemOperand& mem, const Operand& opnd, Register scratch) { // Relocations not supported DCHECK_EQ(opnd.rmode(), RelocInfo::NO_INFO); @@ -3858,7 +3858,7 @@ void TurboAssembler::StoreU64(const MemOperand& mem, const Operand& opnd, } } -void TurboAssembler::LoadMultipleP(Register dst1, Register dst2, +void MacroAssembler::LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem) { #if V8_TARGET_ARCH_S390X DCHECK(is_int20(mem.offset())); @@ -3873,7 +3873,7 @@ void TurboAssembler::LoadMultipleP(Register dst1, Register dst2, #endif } -void TurboAssembler::StoreMultipleP(Register src1, Register src2, +void MacroAssembler::StoreMultipleP(Register src1, Register src2, const MemOperand& mem) { #if V8_TARGET_ARCH_S390X DCHECK(is_int20(mem.offset())); @@ -3888,7 +3888,7 @@ void TurboAssembler::StoreMultipleP(Register src1, Register src2, #endif } -void TurboAssembler::LoadMultipleW(Register dst1, Register dst2, +void MacroAssembler::LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem) { if (is_uint12(mem.offset())) { lm(dst1, dst2, mem); @@ -3898,7 +3898,7 @@ void TurboAssembler::LoadMultipleW(Register dst1, Register dst2, } } -void TurboAssembler::StoreMultipleW(Register src1, Register src2, +void MacroAssembler::StoreMultipleW(Register src1, Register src2, const MemOperand& mem) { if (is_uint12(mem.offset())) { stm(src1, src2, mem); @@ -3909,7 +3909,7 @@ void TurboAssembler::StoreMultipleW(Register src1, Register src2, } // Load 32-bits and sign extend if necessary. -void TurboAssembler::LoadS32(Register dst, Register src) { +void MacroAssembler::LoadS32(Register dst, Register src) { #if V8_TARGET_ARCH_S390X lgfr(dst, src); #else @@ -3918,8 +3918,8 @@ void TurboAssembler::LoadS32(Register dst, Register src) { } // Load 32-bits and sign extend if necessary. -void TurboAssembler::LoadS32(Register dst, const MemOperand& mem, - Register scratch) { +void MacroAssembler::LoadS32(Register dst, const MemOperand& mem, + Register scratch) { int offset = mem.offset(); if (!is_int20(offset)) { @@ -3944,7 +3944,7 @@ void TurboAssembler::LoadS32(Register dst, const MemOperand& mem, } // Load 32-bits and zero extend if necessary. -void TurboAssembler::LoadU32(Register dst, Register src) { +void MacroAssembler::LoadU32(Register dst, Register src) { #if V8_TARGET_ARCH_S390X llgfr(dst, src); #else @@ -3954,8 +3954,8 @@ void TurboAssembler::LoadU32(Register dst, Register src) { // Variable length depending on whether offset fits into immediate field // MemOperand of RX or RXY format -void TurboAssembler::LoadU32(Register dst, const MemOperand& mem, - Register scratch) { +void MacroAssembler::LoadU32(Register dst, const MemOperand& mem, + Register scratch) { Register base = mem.rb(); int offset = mem.offset(); @@ -3995,7 +3995,7 @@ void TurboAssembler::LoadU32(Register dst, const MemOperand& mem, #endif } -void TurboAssembler::LoadU16(Register dst, const MemOperand& mem) { +void MacroAssembler::LoadU16(Register dst, const MemOperand& mem) { // TODO(s390x): Add scratch reg #if V8_TARGET_ARCH_S390X llgh(dst, mem); @@ -4004,7 +4004,7 @@ void TurboAssembler::LoadU16(Register dst, const MemOperand& mem) { #endif } -void TurboAssembler::LoadU16(Register dst, Register src) { +void MacroAssembler::LoadU16(Register dst, Register src) { #if V8_TARGET_ARCH_S390X llghr(dst, src); #else @@ -4012,7 +4012,7 @@ void TurboAssembler::LoadU16(Register dst, Register src) { #endif } -void TurboAssembler::LoadS8(Register dst, const MemOperand& mem) { +void MacroAssembler::LoadS8(Register dst, const MemOperand& mem) { // TODO(s390x): Add scratch reg #if V8_TARGET_ARCH_S390X lgb(dst, mem); @@ -4021,7 +4021,7 @@ void TurboAssembler::LoadS8(Register dst, const MemOperand& mem) { #endif } -void TurboAssembler::LoadS8(Register dst, Register src) { +void MacroAssembler::LoadS8(Register dst, Register src) { #if V8_TARGET_ARCH_S390X lgbr(dst, src); #else @@ -4029,7 +4029,7 @@ void TurboAssembler::LoadS8(Register dst, Register src) { #endif } -void TurboAssembler::LoadU8(Register dst, const MemOperand& mem) { +void MacroAssembler::LoadU8(Register dst, const MemOperand& mem) { // TODO(s390x): Add scratch reg #if V8_TARGET_ARCH_S390X llgc(dst, mem); @@ -4038,7 +4038,7 @@ void TurboAssembler::LoadU8(Register dst, const MemOperand& mem) { #endif } -void TurboAssembler::LoadU8(Register dst, Register src) { +void MacroAssembler::LoadU8(Register dst, Register src) { #if V8_TARGET_ARCH_S390X llgcr(dst, src); #else @@ -4047,34 +4047,34 @@ void TurboAssembler::LoadU8(Register dst, Register src) { } #ifdef V8_TARGET_BIG_ENDIAN -void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem, +void MacroAssembler::LoadU64LE(Register dst, const MemOperand& mem, Register scratch) { lrvg(dst, mem); } -void TurboAssembler::LoadS32LE(Register dst, const MemOperand& opnd, +void MacroAssembler::LoadS32LE(Register dst, const MemOperand& opnd, Register scratch) { lrv(dst, opnd); LoadS32(dst, dst); } -void TurboAssembler::LoadU32LE(Register dst, const MemOperand& opnd, +void MacroAssembler::LoadU32LE(Register dst, const MemOperand& opnd, Register scratch) { lrv(dst, opnd); LoadU32(dst, dst); } -void TurboAssembler::LoadU16LE(Register dst, const MemOperand& opnd) { +void MacroAssembler::LoadU16LE(Register dst, const MemOperand& opnd) { lrvh(dst, opnd); LoadU16(dst, dst); } -void TurboAssembler::LoadS16LE(Register dst, const MemOperand& opnd) { +void MacroAssembler::LoadS16LE(Register dst, const MemOperand& opnd) { lrvh(dst, opnd); LoadS16(dst, dst); } -void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd, Register scratch0, Register scratch1) { bool use_vlbr = CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && is_uint12(opnd.offset()); @@ -4088,20 +4088,20 @@ void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd, Register scratch) { lrvg(scratch, opnd); ldgr(dst, scratch); } -void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd, Register scratch) { lrv(scratch, opnd); ShiftLeftU64(scratch, scratch, Operand(32)); ldgr(dst, scratch); } -void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem, +void MacroAssembler::StoreU64LE(Register src, const MemOperand& mem, Register scratch) { if (!is_int20(mem.offset())) { DCHECK(scratch != no_reg); @@ -4113,7 +4113,7 @@ void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem, } } -void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem, +void MacroAssembler::StoreU32LE(Register src, const MemOperand& mem, Register scratch) { if (!is_int20(mem.offset())) { DCHECK(scratch != no_reg); @@ -4125,7 +4125,7 @@ void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem, } } -void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem, +void MacroAssembler::StoreU16LE(Register src, const MemOperand& mem, Register scratch) { if (!is_int20(mem.offset())) { DCHECK(scratch != no_reg); @@ -4137,14 +4137,14 @@ void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem, } } -void TurboAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd, +void MacroAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd, Register scratch) { DCHECK(is_uint12(opnd.offset())); lgdr(scratch, src); strvg(scratch, opnd); } -void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd, +void MacroAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd, Register scratch) { DCHECK(is_uint12(opnd.offset())); lgdr(scratch, src); @@ -4152,7 +4152,7 @@ void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd, strv(scratch, opnd); } -void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem, +void MacroAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem, Register scratch1, Register scratch2) { bool use_vstbr = CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && is_uint12(mem.offset()); @@ -4168,73 +4168,73 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem, } #else -void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem, +void MacroAssembler::LoadU64LE(Register dst, const MemOperand& mem, Register scratch) { LoadU64(dst, mem, scratch); } -void TurboAssembler::LoadS32LE(Register dst, const MemOperand& opnd, +void MacroAssembler::LoadS32LE(Register dst, const MemOperand& opnd, Register scratch) { LoadS32(dst, opnd, scratch); } -void TurboAssembler::LoadU32LE(Register dst, const MemOperand& opnd, +void MacroAssembler::LoadU32LE(Register dst, const MemOperand& opnd, Register scratch) { LoadU32(dst, opnd, scratch); } -void TurboAssembler::LoadU16LE(Register dst, const MemOperand& opnd) { +void MacroAssembler::LoadU16LE(Register dst, const MemOperand& opnd) { LoadU16(dst, opnd); } -void TurboAssembler::LoadS16LE(Register dst, const MemOperand& opnd) { +void MacroAssembler::LoadS16LE(Register dst, const MemOperand& opnd) { LoadS16(dst, opnd); } -void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd, Register scratch0, Register scratch1) { USE(scratch1); LoadV128(dst, opnd, scratch0); } -void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd, Register scratch) { USE(scratch); LoadF64(dst, opnd); } -void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd, Register scratch) { USE(scratch); LoadF32(dst, opnd); } -void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem, +void MacroAssembler::StoreU64LE(Register src, const MemOperand& mem, Register scratch) { StoreU64(src, mem, scratch); } -void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem, +void MacroAssembler::StoreU32LE(Register src, const MemOperand& mem, Register scratch) { StoreU32(src, mem, scratch); } -void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem, +void MacroAssembler::StoreU16LE(Register src, const MemOperand& mem, Register scratch) { StoreU16(src, mem, scratch); } -void TurboAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd, +void MacroAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd, Register scratch) { StoreF64(src, opnd); } -void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd, +void MacroAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd, Register scratch) { StoreF32(src, opnd); } -void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem, +void MacroAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem, Register scratch1, Register scratch2) { StoreV128(src, mem, scratch1); } @@ -4242,12 +4242,12 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem, #endif // Load And Test (Reg <- Reg) -void TurboAssembler::LoadAndTest32(Register dst, Register src) { +void MacroAssembler::LoadAndTest32(Register dst, Register src) { ltr(dst, src); } // Load And Test Pointer Sized (Reg <- Reg) -void TurboAssembler::LoadAndTestP(Register dst, Register src) { +void MacroAssembler::LoadAndTestP(Register dst, Register src) { #if V8_TARGET_ARCH_S390X ltgr(dst, src); #else @@ -4256,12 +4256,12 @@ void TurboAssembler::LoadAndTestP(Register dst, Register src) { } // Load And Test 32-bit (Reg <- Mem) -void TurboAssembler::LoadAndTest32(Register dst, const MemOperand& mem) { +void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) { lt_z(dst, mem); } // Load And Test Pointer Sized (Reg <- Mem) -void TurboAssembler::LoadAndTestP(Register dst, const MemOperand& mem) { +void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) { #if V8_TARGET_ARCH_S390X ltg(dst, mem); #else @@ -4270,7 +4270,7 @@ void TurboAssembler::LoadAndTestP(Register dst, const MemOperand& mem) { } // Load On Condition Pointer Sized (Reg <- Reg) -void TurboAssembler::LoadOnConditionP(Condition cond, Register dst, +void MacroAssembler::LoadOnConditionP(Condition cond, Register dst, Register src) { #if V8_TARGET_ARCH_S390X locgr(cond, dst, src); @@ -4280,7 +4280,7 @@ void TurboAssembler::LoadOnConditionP(Condition cond, Register dst, } // Load Double Precision (64-bit) Floating Point number from memory -void TurboAssembler::LoadF64(DoubleRegister dst, const MemOperand& mem) { +void MacroAssembler::LoadF64(DoubleRegister dst, const MemOperand& mem) { // for 32bit and 64bit we all use 64bit floating point regs if (is_uint12(mem.offset())) { ld(dst, mem); @@ -4290,7 +4290,7 @@ void TurboAssembler::LoadF64(DoubleRegister dst, const MemOperand& mem) { } // Load Single Precision (32-bit) Floating Point number from memory -void TurboAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) { +void MacroAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) { if (is_uint12(mem.offset())) { le_z(dst, mem); } else { @@ -4299,7 +4299,7 @@ void TurboAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) { } } -void TurboAssembler::LoadV128(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadV128(Simd128Register dst, const MemOperand& mem, Register scratch) { DCHECK(scratch != r0); if (is_uint12(mem.offset())) { @@ -4312,7 +4312,7 @@ void TurboAssembler::LoadV128(Simd128Register dst, const MemOperand& mem, } // Store Double Precision (64-bit) Floating Point number to memory -void TurboAssembler::StoreF64(DoubleRegister dst, const MemOperand& mem) { +void MacroAssembler::StoreF64(DoubleRegister dst, const MemOperand& mem) { if (is_uint12(mem.offset())) { std(dst, mem); } else { @@ -4321,7 +4321,7 @@ void TurboAssembler::StoreF64(DoubleRegister dst, const MemOperand& mem) { } // Store Single Precision (32-bit) Floating Point number to memory -void TurboAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) { +void MacroAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) { if (is_uint12(mem.offset())) { ste(src, mem); } else { @@ -4329,7 +4329,7 @@ void TurboAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) { } } -void TurboAssembler::StoreV128(Simd128Register src, const MemOperand& mem, +void MacroAssembler::StoreV128(Simd128Register src, const MemOperand& mem, Register scratch) { DCHECK(scratch != r0); if (is_uint12(mem.offset())) { @@ -4341,7 +4341,7 @@ void TurboAssembler::StoreV128(Simd128Register src, const MemOperand& mem, } } -void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { aebr(dst, rhs); @@ -4353,7 +4353,7 @@ void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { sebr(dst, rhs); @@ -4366,7 +4366,7 @@ void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { meebr(dst, rhs); @@ -4378,7 +4378,7 @@ void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { debr(dst, rhs); @@ -4394,7 +4394,7 @@ void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { adbr(dst, rhs); @@ -4406,7 +4406,7 @@ void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { sdbr(dst, rhs); @@ -4419,7 +4419,7 @@ void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { mdbr(dst, rhs); @@ -4431,7 +4431,7 @@ void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { ddbr(dst, rhs); @@ -4447,7 +4447,7 @@ void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { aeb(dst, opnd); @@ -4457,7 +4457,7 @@ void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { adb(dst, opnd); @@ -4467,7 +4467,7 @@ void TurboAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { seb(dst, opnd); @@ -4477,7 +4477,7 @@ void TurboAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { sdb(dst, opnd); @@ -4487,7 +4487,7 @@ void TurboAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { meeb(dst, opnd); @@ -4497,7 +4497,7 @@ void TurboAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { mdb(dst, opnd); @@ -4507,7 +4507,7 @@ void TurboAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { deb(dst, opnd); @@ -4517,7 +4517,7 @@ void TurboAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { ddb(dst, opnd); @@ -4527,7 +4527,7 @@ void TurboAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { ldeb(dst, opnd); @@ -4539,7 +4539,7 @@ void TurboAssembler::LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd, // Variable length depending on whether offset fits into immediate field // MemOperand of RX or RXY format -void TurboAssembler::StoreU32(Register src, const MemOperand& mem, +void MacroAssembler::StoreU32(Register src, const MemOperand& mem, Register scratch) { Register base = mem.rb(); int offset = mem.offset(); @@ -4570,7 +4570,7 @@ void TurboAssembler::StoreU32(Register src, const MemOperand& mem, } } -void TurboAssembler::LoadS16(Register dst, Register src) { +void MacroAssembler::LoadS16(Register dst, Register src) { #if V8_TARGET_ARCH_S390X lghr(dst, src); #else @@ -4580,8 +4580,8 @@ void TurboAssembler::LoadS16(Register dst, Register src) { // Loads 16-bits half-word value from memory and sign extends to pointer // sized register -void TurboAssembler::LoadS16(Register dst, const MemOperand& mem, - Register scratch) { +void MacroAssembler::LoadS16(Register dst, const MemOperand& mem, + Register scratch) { Register base = mem.rb(); int offset = mem.offset(); @@ -4608,7 +4608,7 @@ void TurboAssembler::LoadS16(Register dst, const MemOperand& mem, // Variable length depending on whether offset fits into immediate field // MemOperand current only supports d-form -void TurboAssembler::StoreU16(Register src, const MemOperand& mem, +void MacroAssembler::StoreU16(Register src, const MemOperand& mem, Register scratch) { Register base = mem.rb(); int offset = mem.offset(); @@ -4626,7 +4626,7 @@ void TurboAssembler::StoreU16(Register src, const MemOperand& mem, // Variable length depending on whether offset fits into immediate field // MemOperand current only supports d-form -void TurboAssembler::StoreU8(Register src, const MemOperand& mem, +void MacroAssembler::StoreU8(Register src, const MemOperand& mem, Register scratch) { Register base = mem.rb(); int offset = mem.offset(); @@ -4643,13 +4643,13 @@ void TurboAssembler::StoreU8(Register src, const MemOperand& mem, } // Shift left logical for 32-bit integer types. -void TurboAssembler::ShiftLeftU32(Register dst, Register src, +void MacroAssembler::ShiftLeftU32(Register dst, Register src, const Operand& val) { ShiftLeftU32(dst, src, r0, val); } // Shift left logical for 32-bit integer types. -void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register val, +void MacroAssembler::ShiftLeftU32(Register dst, Register src, Register val, const Operand& val2) { if (dst == src) { sll(dst, val, val2); @@ -4663,25 +4663,25 @@ void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register val, } // Shift left logical for 32-bit integer types. -void TurboAssembler::ShiftLeftU64(Register dst, Register src, +void MacroAssembler::ShiftLeftU64(Register dst, Register src, const Operand& val) { ShiftLeftU64(dst, src, r0, val); } // Shift left logical for 32-bit integer types. -void TurboAssembler::ShiftLeftU64(Register dst, Register src, Register val, +void MacroAssembler::ShiftLeftU64(Register dst, Register src, Register val, const Operand& val2) { sllg(dst, src, val, val2); } // Shift right logical for 32-bit integer types. -void TurboAssembler::ShiftRightU32(Register dst, Register src, +void MacroAssembler::ShiftRightU32(Register dst, Register src, const Operand& val) { ShiftRightU32(dst, src, r0, val); } // Shift right logical for 32-bit integer types. -void TurboAssembler::ShiftRightU32(Register dst, Register src, Register val, +void MacroAssembler::ShiftRightU32(Register dst, Register src, Register val, const Operand& val2) { if (dst == src) { srl(dst, val, val2); @@ -4694,25 +4694,25 @@ void TurboAssembler::ShiftRightU32(Register dst, Register src, Register val, } } -void TurboAssembler::ShiftRightU64(Register dst, Register src, Register val, +void MacroAssembler::ShiftRightU64(Register dst, Register src, Register val, const Operand& val2) { srlg(dst, src, val, val2); } // Shift right logical for 64-bit integer types. -void TurboAssembler::ShiftRightU64(Register dst, Register src, +void MacroAssembler::ShiftRightU64(Register dst, Register src, const Operand& val) { ShiftRightU64(dst, src, r0, val); } // Shift right arithmetic for 32-bit integer types. -void TurboAssembler::ShiftRightS32(Register dst, Register src, +void MacroAssembler::ShiftRightS32(Register dst, Register src, const Operand& val) { ShiftRightS32(dst, src, r0, val); } // Shift right arithmetic for 32-bit integer types. -void TurboAssembler::ShiftRightS32(Register dst, Register src, Register val, +void MacroAssembler::ShiftRightS32(Register dst, Register src, Register val, const Operand& val2) { if (dst == src) { sra(dst, val, val2); @@ -4726,19 +4726,19 @@ void TurboAssembler::ShiftRightS32(Register dst, Register src, Register val, } // Shift right arithmetic for 64-bit integer types. -void TurboAssembler::ShiftRightS64(Register dst, Register src, +void MacroAssembler::ShiftRightS64(Register dst, Register src, const Operand& val) { ShiftRightS64(dst, src, r0, val); } // Shift right arithmetic for 64-bit integer types. -void TurboAssembler::ShiftRightS64(Register dst, Register src, Register val, +void MacroAssembler::ShiftRightS64(Register dst, Register src, Register val, const Operand& val2) { srag(dst, src, val, val2); } // Clear right most # of bits -void TurboAssembler::ClearRightImm(Register dst, Register src, +void MacroAssembler::ClearRightImm(Register dst, Register src, const Operand& val) { int numBitsToClear = val.immediate() % (kSystemPointerSize * 8); @@ -4765,7 +4765,7 @@ void TurboAssembler::ClearRightImm(Register dst, Register src, } } -void TurboAssembler::Popcnt32(Register dst, Register src) { +void MacroAssembler::Popcnt32(Register dst, Register src) { DCHECK(src != r0); DCHECK(dst != r0); @@ -4778,7 +4778,7 @@ void TurboAssembler::Popcnt32(Register dst, Register src) { } #ifdef V8_TARGET_ARCH_S390X -void TurboAssembler::Popcnt64(Register dst, Register src) { +void MacroAssembler::Popcnt64(Register dst, Register src) { DCHECK(src != r0); DCHECK(dst != r0); @@ -4793,7 +4793,7 @@ void TurboAssembler::Popcnt64(Register dst, Register src) { } #endif -void TurboAssembler::SwapP(Register src, Register dst, Register scratch) { +void MacroAssembler::SwapP(Register src, Register dst, Register scratch) { if (src == dst) return; DCHECK(!AreAliased(src, dst, scratch)); mov(scratch, src); @@ -4801,7 +4801,7 @@ void TurboAssembler::SwapP(Register src, Register dst, Register scratch) { mov(dst, scratch); } -void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) { +void MacroAssembler::SwapP(Register src, MemOperand dst, Register scratch) { if (dst.rx() != r0) DCHECK(!AreAliased(src, dst.rx(), scratch)); if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch)); DCHECK(!AreAliased(src, scratch)); @@ -4810,7 +4810,7 @@ void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) { StoreU64(scratch, dst); } -void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0, +void MacroAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0, Register scratch_1) { if (src.rx() != r0) DCHECK(!AreAliased(src.rx(), scratch_0, scratch_1)); if (src.rb() != r0) DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1)); @@ -4823,7 +4823,7 @@ void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0, StoreU64(scratch_1, src); } -void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst, +void MacroAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch) { if (src == dst) return; DCHECK(!AreAliased(src, dst, scratch)); @@ -4832,7 +4832,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst, ldr(dst, scratch); } -void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst, +void MacroAssembler::SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch) { DCHECK(!AreAliased(src, scratch)); ldr(scratch, src); @@ -4840,7 +4840,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst, StoreF32(scratch, dst); } -void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst, +void MacroAssembler::SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch) { // push d0, to be used as scratch lay(sp, MemOperand(sp, -kDoubleSize)); @@ -4854,7 +4854,7 @@ void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst, lay(sp, MemOperand(sp, kDoubleSize)); } -void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst, +void MacroAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch) { if (src == dst) return; DCHECK(!AreAliased(src, dst, scratch)); @@ -4863,7 +4863,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst, ldr(dst, scratch); } -void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst, +void MacroAssembler::SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch) { DCHECK(!AreAliased(src, scratch)); ldr(scratch, src); @@ -4871,7 +4871,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst, StoreF64(scratch, dst); } -void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst, +void MacroAssembler::SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch) { // push d0, to be used as scratch lay(sp, MemOperand(sp, -kDoubleSize)); @@ -4885,7 +4885,7 @@ void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst, lay(sp, MemOperand(sp, kDoubleSize)); } -void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst, +void MacroAssembler::SwapSimd128(Simd128Register src, Simd128Register dst, Simd128Register scratch) { if (src == dst) return; vlr(scratch, src, Condition(0), Condition(0), Condition(0)); @@ -4893,7 +4893,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst, vlr(dst, scratch, Condition(0), Condition(0), Condition(0)); } -void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst, +void MacroAssembler::SwapSimd128(Simd128Register src, MemOperand dst, Simd128Register scratch) { DCHECK(!AreAliased(src, scratch)); vlr(scratch, src, Condition(0), Condition(0), Condition(0)); @@ -4901,7 +4901,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst, StoreV128(scratch, dst, ip); } -void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst, +void MacroAssembler::SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch) { // push d0, to be used as scratch lay(sp, MemOperand(sp, -kSimd128Size)); @@ -4915,27 +4915,27 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst, lay(sp, MemOperand(sp, kSimd128Size)); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { larl(dst, Operand(-pc_offset() / 2)); } -void TurboAssembler::LoadPC(Register dst) { +void MacroAssembler::LoadPC(Register dst) { Label current_pc; larl(dst, ¤t_pc); bind(¤t_pc); } -void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { CmpS32(x, Operand(y)); beq(dest); } -void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { CmpS32(x, Operand(y)); blt(dest); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { static_assert(kSystemPointerSize == 8); static_assert(kSmiTagSize == 1); static_assert(kSmiTag == 0); @@ -4952,31 +4952,31 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { IsolateData::builtin_entry_table_offset())); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { LoadEntryFromBuiltinIndex(builtin_index); Call(builtin_index); } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { ASM_CODE_COMMENT(this); LoadU64(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { ASM_CODE_COMMENT(this); DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) { ASM_CODE_COMMENT(this); LoadU64(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -4986,20 +4986,20 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object) { ASM_CODE_COMMENT(this); LoadCodeEntry(code_object, code_object); Call(code_object); } -void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); LoadCodeEntry(code_object, code_object); Jump(code_object); } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. // @@ -5017,7 +5017,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { bind(&return_label); } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); @@ -5029,10 +5029,10 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::Trap() { stop(); } -void TurboAssembler::DebugBreak() { stop(); } +void MacroAssembler::Trap() { stop(); } +void MacroAssembler::DebugBreak() { stop(); } -void TurboAssembler::CountLeadingZerosU32(Register dst, Register src, +void MacroAssembler::CountLeadingZerosU32(Register dst, Register src, Register scratch_pair) { llgfr(dst, src); flogr(scratch_pair, @@ -5040,14 +5040,14 @@ void TurboAssembler::CountLeadingZerosU32(Register dst, Register src, AddS32(dst, scratch_pair, Operand(-32)); } -void TurboAssembler::CountLeadingZerosU64(Register dst, Register src, +void MacroAssembler::CountLeadingZerosU64(Register dst, Register src, Register scratch_pair) { flogr(scratch_pair, src); // will modify a register pair scratch and scratch + 1 mov(dst, scratch_pair); } -void TurboAssembler::CountTrailingZerosU32(Register dst, Register src, +void MacroAssembler::CountTrailingZerosU32(Register dst, Register src, Register scratch_pair) { Register scratch0 = scratch_pair; Register scratch1 = Register::from_code(scratch_pair.code() + 1); @@ -5068,7 +5068,7 @@ void TurboAssembler::CountTrailingZerosU32(Register dst, Register src, bind(&done); } -void TurboAssembler::CountTrailingZerosU64(Register dst, Register src, +void MacroAssembler::CountTrailingZerosU64(Register dst, Register src, Register scratch_pair) { Register scratch0 = scratch_pair; Register scratch1 = Register::from_code(scratch_pair.code() + 1); @@ -5088,7 +5088,7 @@ void TurboAssembler::CountTrailingZerosU64(Register dst, Register src, bind(&done); } -void TurboAssembler::AtomicCmpExchangeHelper(Register addr, Register output, +void MacroAssembler::AtomicCmpExchangeHelper(Register addr, Register output, Register old_value, Register new_value, int start, int end, int shift_amount, @@ -5106,7 +5106,7 @@ void TurboAssembler::AtomicCmpExchangeHelper(Register addr, Register output, Operand(64 - shift_amount), true); } -void TurboAssembler::AtomicCmpExchangeU8(Register addr, Register output, +void MacroAssembler::AtomicCmpExchangeU8(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1) { #ifdef V8_TARGET_BIG_ENDIAN @@ -5155,7 +5155,7 @@ void TurboAssembler::AtomicCmpExchangeU8(Register addr, Register output, bind(&done); } -void TurboAssembler::AtomicCmpExchangeU16(Register addr, Register output, +void MacroAssembler::AtomicCmpExchangeU16(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1) { @@ -5193,7 +5193,7 @@ void TurboAssembler::AtomicCmpExchangeU16(Register addr, Register output, bind(&done); } -void TurboAssembler::AtomicExchangeHelper(Register addr, Register value, +void MacroAssembler::AtomicExchangeHelper(Register addr, Register value, Register output, int start, int end, int shift_amount, int offset, Register scratch) { @@ -5208,7 +5208,7 @@ void TurboAssembler::AtomicExchangeHelper(Register addr, Register value, srl(output, Operand(shift_amount)); } -void TurboAssembler::AtomicExchangeU8(Register addr, Register value, +void MacroAssembler::AtomicExchangeU8(Register addr, Register value, Register output, Register scratch) { #ifdef V8_TARGET_BIG_ENDIAN #define ATOMIC_EXCHANGE_BYTE(i) \ @@ -5260,7 +5260,7 @@ void TurboAssembler::AtomicExchangeU8(Register addr, Register value, bind(&done); } -void TurboAssembler::AtomicExchangeU16(Register addr, Register value, +void MacroAssembler::AtomicExchangeU16(Register addr, Register value, Register output, Register scratch) { #ifdef V8_TARGET_BIG_ENDIAN #define ATOMIC_EXCHANGE_HALFWORD(i) \ @@ -5301,77 +5301,77 @@ void TurboAssembler::AtomicExchangeU16(Register addr, Register value, } // Simd Support. -void TurboAssembler::F64x2Splat(Simd128Register dst, Simd128Register src) { +void MacroAssembler::F64x2Splat(Simd128Register dst, Simd128Register src) { vrep(dst, src, Operand(0), Condition(3)); } -void TurboAssembler::F32x4Splat(Simd128Register dst, Simd128Register src) { +void MacroAssembler::F32x4Splat(Simd128Register dst, Simd128Register src) { vrep(dst, src, Operand(0), Condition(2)); } -void TurboAssembler::I64x2Splat(Simd128Register dst, Register src) { +void MacroAssembler::I64x2Splat(Simd128Register dst, Register src) { vlvg(dst, src, MemOperand(r0, 0), Condition(3)); vrep(dst, dst, Operand(0), Condition(3)); } -void TurboAssembler::I32x4Splat(Simd128Register dst, Register src) { +void MacroAssembler::I32x4Splat(Simd128Register dst, Register src) { vlvg(dst, src, MemOperand(r0, 0), Condition(2)); vrep(dst, dst, Operand(0), Condition(2)); } -void TurboAssembler::I16x8Splat(Simd128Register dst, Register src) { +void MacroAssembler::I16x8Splat(Simd128Register dst, Register src) { vlvg(dst, src, MemOperand(r0, 0), Condition(1)); vrep(dst, dst, Operand(0), Condition(1)); } -void TurboAssembler::I8x16Splat(Simd128Register dst, Register src) { +void MacroAssembler::I8x16Splat(Simd128Register dst, Register src) { vlvg(dst, src, MemOperand(r0, 0), Condition(0)); vrep(dst, dst, Operand(0), Condition(0)); } -void TurboAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src, +void MacroAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Register) { vrep(dst, src, Operand(1 - imm_lane_idx), Condition(3)); } -void TurboAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src, +void MacroAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Register) { vrep(dst, src, Operand(3 - imm_lane_idx), Condition(2)); } -void TurboAssembler::I64x2ExtractLane(Register dst, Simd128Register src, +void MacroAssembler::I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register) { vlgv(dst, src, MemOperand(r0, 1 - imm_lane_idx), Condition(3)); } -void TurboAssembler::I32x4ExtractLane(Register dst, Simd128Register src, +void MacroAssembler::I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register) { vlgv(dst, src, MemOperand(r0, 3 - imm_lane_idx), Condition(2)); } -void TurboAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src, +void MacroAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register) { vlgv(dst, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1)); } -void TurboAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src, +void MacroAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register scratch) { vlgv(scratch, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1)); lghr(dst, scratch); } -void TurboAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src, +void MacroAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register) { vlgv(dst, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0)); } -void TurboAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src, +void MacroAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register scratch) { vlgv(scratch, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0)); lgbr(dst, scratch); } -void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch) { vlgv(scratch, src2, MemOperand(r0, 0), Condition(3)); @@ -5381,7 +5381,7 @@ void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, vlvg(dst, scratch, MemOperand(r0, 1 - imm_lane_idx), Condition(3)); } -void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch) { vlgv(scratch, src2, MemOperand(r0, 0), Condition(2)); @@ -5391,7 +5391,7 @@ void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, vlvg(dst, scratch, MemOperand(r0, 3 - imm_lane_idx), Condition(2)); } -void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Register) { if (src1 != dst) { @@ -5400,7 +5400,7 @@ void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, vlvg(dst, src2, MemOperand(r0, 1 - imm_lane_idx), Condition(3)); } -void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Register) { if (src1 != dst) { @@ -5409,7 +5409,7 @@ void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, vlvg(dst, src2, MemOperand(r0, 3 - imm_lane_idx), Condition(2)); } -void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Register) { if (src1 != dst) { @@ -5418,7 +5418,7 @@ void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, vlvg(dst, src2, MemOperand(r0, 7 - imm_lane_idx), Condition(1)); } -void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Register) { if (src1 != dst) { @@ -5427,19 +5427,19 @@ void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, vlvg(dst, src2, MemOperand(r0, 15 - imm_lane_idx), Condition(0)); } -void TurboAssembler::S128Not(Simd128Register dst, Simd128Register src) { +void MacroAssembler::S128Not(Simd128Register dst, Simd128Register src) { vno(dst, src, src, Condition(0), Condition(0), Condition(0)); } -void TurboAssembler::S128Zero(Simd128Register dst, Simd128Register src) { +void MacroAssembler::S128Zero(Simd128Register dst, Simd128Register src) { vx(dst, src, src, Condition(0), Condition(0), Condition(0)); } -void TurboAssembler::S128AllOnes(Simd128Register dst, Simd128Register src) { +void MacroAssembler::S128AllOnes(Simd128Register dst, Simd128Register src) { vceq(dst, src, src, Condition(0), Condition(3)); } -void TurboAssembler::S128Select(Simd128Register dst, Simd128Register src1, +void MacroAssembler::S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask) { vsel(dst, src1, src2, mask, Condition(0), Condition(0)); } @@ -5482,7 +5482,7 @@ void TurboAssembler::S128Select(Simd128Register dst, Simd128Register src1, V(I8x16Popcnt, vpopct, 0, 0, 0) #define EMIT_SIMD_UNOP_VRR_A(name, op, c1, c2, c3) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src) { \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src) { \ op(dst, src, Condition(c1), Condition(c2), Condition(c3)); \ } SIMD_UNOP_LIST_VRR_A(EMIT_SIMD_UNOP_VRR_A) @@ -5503,7 +5503,7 @@ SIMD_UNOP_LIST_VRR_A(EMIT_SIMD_UNOP_VRR_A) V(I8x16GtU, vchl, 0, 0) #define EMIT_SIMD_BINOP_VRR_B(name, op, c1, c2) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2) { \ op(dst, src1, src2, Condition(c1), Condition(c2)); \ } @@ -5560,7 +5560,7 @@ SIMD_BINOP_LIST_VRR_B(EMIT_SIMD_BINOP_VRR_B) V(S128AndNot, vnc, 0, 0, 0) #define EMIT_SIMD_BINOP_VRR_C(name, op, c1, c2, c3) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2) { \ op(dst, src1, src2, Condition(c1), Condition(c2), Condition(c3)); \ } @@ -5583,13 +5583,13 @@ SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C) V(I8x16ShrU, vesrlv, 0) #define EMIT_SIMD_SHIFT(name, op, c1) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Register src2, Simd128Register scratch) { \ vlvg(scratch, src2, MemOperand(r0, 0), Condition(c1)); \ vrep(scratch, scratch, Operand(0), Condition(c1)); \ op(dst, src1, scratch, Condition(0), Condition(0), Condition(c1)); \ } \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ const Operand& src2, Register scratch1, \ Simd128Register scratch2) { \ mov(scratch1, src2); \ @@ -5614,7 +5614,7 @@ SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT) V(I16x8ExtMulHighI8x16U, vmle, vmlo, vmrh, 0) #define EMIT_SIMD_EXT_MUL(name, mul_even, mul_odd, merge, mode) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2, Simd128Register scratch) { \ mul_even(scratch, src1, src2, Condition(0), Condition(0), \ Condition(mode)); \ @@ -5632,7 +5632,7 @@ SIMD_EXT_MUL_LIST(EMIT_SIMD_EXT_MUL) V(I8x16AllTrue, 0) #define EMIT_SIMD_ALL_TRUE(name, mode) \ - void TurboAssembler::name(Register dst, Simd128Register src, \ + void MacroAssembler::name(Register dst, Simd128Register src, \ Register scratch1, Simd128Register scratch2) { \ mov(scratch1, Operand(1)); \ xgr(dst, dst); \ @@ -5653,7 +5653,7 @@ SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE) V(F32x4Qfms, vfnms, 2) #define EMIT_SIMD_QFM(name, op, c1) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2, Simd128Register src3) { \ op(dst, src1, src2, src3, Condition(c1), Condition(0)); \ } @@ -5661,7 +5661,7 @@ SIMD_QFM_LIST(EMIT_SIMD_QFM) #undef EMIT_SIMD_QFM #undef SIMD_QFM_LIST -void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Register scratch3) { Register scratch_1 = scratch1; @@ -5676,112 +5676,112 @@ void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1, vlvgp(dst, scratch1, scratch2); } -void TurboAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vfce(dst, src1, src2, Condition(0), Condition(0), Condition(3)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(3)); } -void TurboAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vfch(dst, src2, src1, Condition(0), Condition(0), Condition(3)); } -void TurboAssembler::F64x2Le(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Le(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vfche(dst, src2, src1, Condition(0), Condition(0), Condition(3)); } -void TurboAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vfce(dst, src1, src2, Condition(0), Condition(0), Condition(2)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(2)); } -void TurboAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vfch(dst, src2, src1, Condition(0), Condition(0), Condition(2)); } -void TurboAssembler::F32x4Le(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Le(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vfche(dst, src2, src1, Condition(0), Condition(0), Condition(2)); } -void TurboAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vceq(dst, src1, src2, Condition(0), Condition(3)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(3)); } -void TurboAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2) { // Compute !(B > A) which is equal to A >= B. vch(dst, src2, src1, Condition(0), Condition(3)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(3)); } -void TurboAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vceq(dst, src1, src2, Condition(0), Condition(2)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(2)); } -void TurboAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2) { // Compute !(B > A) which is equal to A >= B. vch(dst, src2, src1, Condition(0), Condition(2)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(2)); } -void TurboAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vceq(scratch, src1, src2, Condition(0), Condition(2)); vchl(dst, src1, src2, Condition(0), Condition(2)); vo(dst, dst, scratch, Condition(0), Condition(0), Condition(2)); } -void TurboAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vceq(dst, src1, src2, Condition(0), Condition(1)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(1)); } -void TurboAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2) { // Compute !(B > A) which is equal to A >= B. vch(dst, src2, src1, Condition(0), Condition(1)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(1)); } -void TurboAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vceq(scratch, src1, src2, Condition(0), Condition(1)); vchl(dst, src1, src2, Condition(0), Condition(1)); vo(dst, dst, scratch, Condition(0), Condition(0), Condition(1)); } -void TurboAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vceq(dst, src1, src2, Condition(0), Condition(0)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(0)); } -void TurboAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2) { // Compute !(B > A) which is equal to A >= B. vch(dst, src2, src1, Condition(0), Condition(0)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(0)); } -void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vceq(scratch, src1, src2, Condition(0), Condition(0)); vchl(dst, src1, src2, Condition(0), Condition(0)); vo(dst, dst, scratch, Condition(0), Condition(0), Condition(0)); } -void TurboAssembler::I64x2BitMask(Register dst, Simd128Register src, +void MacroAssembler::I64x2BitMask(Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { mov(scratch1, Operand(0x8080808080800040)); vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3)); @@ -5789,7 +5789,7 @@ void TurboAssembler::I64x2BitMask(Register dst, Simd128Register src, vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0)); } -void TurboAssembler::I32x4BitMask(Register dst, Simd128Register src, +void MacroAssembler::I32x4BitMask(Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { mov(scratch1, Operand(0x8080808000204060)); vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3)); @@ -5797,7 +5797,7 @@ void TurboAssembler::I32x4BitMask(Register dst, Simd128Register src, vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0)); } -void TurboAssembler::I16x8BitMask(Register dst, Simd128Register src, +void MacroAssembler::I16x8BitMask(Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { mov(scratch1, Operand(0x10203040506070)); vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3)); @@ -5805,19 +5805,19 @@ void TurboAssembler::I16x8BitMask(Register dst, Simd128Register src, vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0)); } -void TurboAssembler::F64x2ConvertLowI32x4S(Simd128Register dst, +void MacroAssembler::F64x2ConvertLowI32x4S(Simd128Register dst, Simd128Register src) { vupl(dst, src, Condition(0), Condition(0), Condition(2)); vcdg(dst, dst, Condition(4), Condition(0), Condition(3)); } -void TurboAssembler::F64x2ConvertLowI32x4U(Simd128Register dst, +void MacroAssembler::F64x2ConvertLowI32x4U(Simd128Register dst, Simd128Register src) { vupll(dst, src, Condition(0), Condition(0), Condition(2)); vcdlg(dst, dst, Condition(4), Condition(0), Condition(3)); } -void TurboAssembler::I8x16BitMask(Register dst, Simd128Register src, +void MacroAssembler::I8x16BitMask(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3) { mov(scratch1, Operand(0x4048505860687078)); @@ -5827,7 +5827,7 @@ void TurboAssembler::I8x16BitMask(Register dst, Simd128Register src, vlgv(dst, scratch3, MemOperand(r0, 3), Condition(1)); } -void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src, +void MacroAssembler::V128AnyTrue(Register dst, Simd128Register src, Register scratch) { mov(dst, Operand(1)); xgr(scratch, scratch); @@ -5842,7 +5842,7 @@ void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src, convert(scratch2, scratch1, kRoundToZero); \ vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \ } -void TurboAssembler::I32x4SConvertF32x4(Simd128Register dst, +void MacroAssembler::I32x4SConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2) { @@ -5856,7 +5856,7 @@ void TurboAssembler::I32x4SConvertF32x4(Simd128Register dst, } } -void TurboAssembler::I32x4UConvertF32x4(Simd128Register dst, +void MacroAssembler::I32x4UConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2) { @@ -5878,7 +5878,7 @@ void TurboAssembler::I32x4UConvertF32x4(Simd128Register dst, MovFloatToInt(scratch2, scratch1); \ vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \ } -void TurboAssembler::F32x4SConvertI32x4(Simd128Register dst, +void MacroAssembler::F32x4SConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2) { @@ -5888,7 +5888,7 @@ void TurboAssembler::F32x4SConvertI32x4(Simd128Register dst, CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, dst, src, scratch1, scratch2) } } -void TurboAssembler::F32x4UConvertI32x4(Simd128Register dst, +void MacroAssembler::F32x4UConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2) { @@ -5901,13 +5901,13 @@ void TurboAssembler::F32x4UConvertI32x4(Simd128Register dst, } #undef CONVERT_INT32_TO_FLOAT -void TurboAssembler::I16x8SConvertI32x4(Simd128Register dst, +void MacroAssembler::I16x8SConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vpks(dst, src2, src1, Condition(0), Condition(2)); } -void TurboAssembler::I8x16SConvertI16x8(Simd128Register dst, +void MacroAssembler::I8x16SConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vpks(dst, src2, src1, Condition(0), Condition(1)); @@ -5919,7 +5919,7 @@ void TurboAssembler::I8x16SConvertI16x8(Simd128Register dst, vmx(scratch, src1, kDoubleRegZero, Condition(0), Condition(0), \ Condition(mode)); \ vmx(dst, src2, kDoubleRegZero, Condition(0), Condition(0), Condition(mode)); -void TurboAssembler::I16x8UConvertI32x4(Simd128Register dst, +void MacroAssembler::I16x8UConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { @@ -5928,7 +5928,7 @@ void TurboAssembler::I16x8UConvertI32x4(Simd128Register dst, vpkls(dst, dst, scratch, Condition(0), Condition(2)); } -void TurboAssembler::I8x16UConvertI16x8(Simd128Register dst, +void MacroAssembler::I8x16UConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { @@ -5950,7 +5950,7 @@ void TurboAssembler::I8x16UConvertI16x8(Simd128Register dst, extract_low(scratch2, src2, Condition(0), Condition(0), Condition(mode)); \ op(scratch1, scratch1, scratch2, Condition(0), Condition(0), \ Condition(mode + 1)); -void TurboAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -5958,7 +5958,7 @@ void TurboAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1, vpks(dst, dst, scratch1, Condition(0), Condition(2)); } -void TurboAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -5966,7 +5966,7 @@ void TurboAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1, vpks(dst, dst, scratch1, Condition(0), Condition(2)); } -void TurboAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -5974,7 +5974,7 @@ void TurboAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1, vpkls(dst, dst, scratch1, Condition(0), Condition(2)); } -void TurboAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -5988,7 +5988,7 @@ void TurboAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1, vpkls(dst, dst, scratch1, Condition(0), Condition(2)); } -void TurboAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -5996,7 +5996,7 @@ void TurboAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1, vpks(dst, dst, scratch1, Condition(0), Condition(1)); } -void TurboAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -6004,7 +6004,7 @@ void TurboAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1, vpks(dst, dst, scratch1, Condition(0), Condition(1)); } -void TurboAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -6012,7 +6012,7 @@ void TurboAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1, vpkls(dst, dst, scratch1, Condition(0), Condition(1)); } -void TurboAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -6027,7 +6027,7 @@ void TurboAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1, } #undef BINOP_EXTRACT -void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst, +void MacroAssembler::F64x2PromoteLowF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2, Register scratch3, @@ -6043,7 +6043,7 @@ void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst, vlvgp(dst, scratch3, scratch4); } -void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, +void MacroAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2, Register scratch3, @@ -6071,14 +6071,14 @@ void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, Condition(lane_size)); \ va(dst, scratch1, scratch2, Condition(0), Condition(0), \ Condition(lane_size + 1)); -void TurboAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst, +void MacroAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 1, vme, vmo) } -void TurboAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst, +void MacroAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst, Simd128Register src, Simd128Register scratch, Simd128Register scratch2) { @@ -6086,14 +6086,14 @@ void TurboAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst, vsum(dst, src, scratch, Condition(0), Condition(0), Condition(1)); } -void TurboAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst, +void MacroAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 0, vme, vmo) } -void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst, +void MacroAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { @@ -6101,7 +6101,7 @@ void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst, } #undef EXT_ADD_PAIRWISE -void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst, +void MacroAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst, Simd128Register src, Simd128Register scratch) { // NaN to 0. @@ -6113,7 +6113,7 @@ void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst, vpks(dst, dst, scratch, Condition(0), Condition(3)); } -void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst, +void MacroAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst, Simd128Register src, Simd128Register scratch) { vclgd(scratch, src, Condition(5), Condition(0), Condition(3)); @@ -6121,14 +6121,14 @@ void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst, vpkls(dst, dst, scratch, Condition(0), Condition(3)); } -void TurboAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low, +void MacroAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low, Register scratch1, Register scratch2) { mov(scratch1, Operand(low)); mov(scratch2, Operand(high)); vlvgp(dst, scratch2, scratch1); } -void TurboAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Simd128Register scratch3, Simd128Register scratch4) { @@ -6148,7 +6148,7 @@ void TurboAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1, vperm(dst, dst, scratch3, scratch4, Condition(0), Condition(0)); } -void TurboAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1, Simd128Register src2, uint64_t high, uint64_t low, Register scratch1, Register scratch2, Simd128Register scratch3) { @@ -6158,7 +6158,7 @@ void TurboAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1, vperm(dst, src1, src2, scratch3, Condition(0), Condition(0)); } -void TurboAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vme(scratch, src1, src2, Condition(0), Condition(0), Condition(1)); @@ -6176,7 +6176,7 @@ void TurboAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, vrepi(scratch, Operand(15), Condition(2)); \ vesrav(accumulator, accumulator, scratch, Condition(0), Condition(0), \ Condition(2)); -void TurboAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2, @@ -6206,7 +6206,7 @@ void TurboAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, V(8x16, vlrep, LoadU8, 0) #define LOAD_SPLAT(name, vector_instr, scalar_instr, condition) \ - void TurboAssembler::LoadAndSplat##name##LE( \ + void MacroAssembler::LoadAndSplat##name##LE( \ Simd128Register dst, const MemOperand& mem, Register scratch) { \ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \ vector_instr(dst, mem, Condition(condition)); \ @@ -6229,7 +6229,7 @@ LOAD_SPLAT_LIST(LOAD_SPLAT) V(8x8S, vuph, 0) #define LOAD_EXTEND(name, unpack_instr, condition) \ - void TurboAssembler::LoadAndExtend##name##LE( \ + void MacroAssembler::LoadAndExtend##name##LE( \ Simd128Register dst, const MemOperand& mem, Register scratch) { \ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \ vlebrg(dst, mem, Condition(0)); \ @@ -6243,7 +6243,7 @@ LOAD_EXTEND_LIST(LOAD_EXTEND) #undef LOAD_EXTEND #undef LOAD_EXTEND -void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem, Register scratch) { vx(dst, dst, dst, Condition(0), Condition(0), Condition(0)); if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { @@ -6254,7 +6254,7 @@ void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem, vlvg(dst, scratch, MemOperand(r0, 3), Condition(2)); } -void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem, Register scratch) { vx(dst, dst, dst, Condition(0), Condition(0), Condition(0)); if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { @@ -6272,7 +6272,7 @@ void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem, V(8, vleb, LoadU8, 0) #define LOAD_LANE(name, vector_instr, scalar_instr, condition) \ - void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \ + void MacroAssembler::LoadLane##name##LE(Simd128Register dst, \ const MemOperand& mem, int lane, \ Register scratch) { \ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \ @@ -6293,7 +6293,7 @@ LOAD_LANE_LIST(LOAD_LANE) V(8, vsteb, StoreU8, 0) #define STORE_LANE(name, vector_instr, scalar_instr, condition) \ - void TurboAssembler::StoreLane##name##LE(Simd128Register src, \ + void MacroAssembler::StoreLane##name##LE(Simd128Register src, \ const MemOperand& mem, int lane, \ Register scratch) { \ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \ @@ -6317,10 +6317,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); LoadU64(destination, MemOperand(kRootRegister, offset)); } diff --git a/src/codegen/s390/macro-assembler-s390.h b/src/codegen/s390/macro-assembler-s390.h index a008a3ab7d..1ada98dd21 100644 --- a/src/codegen/s390/macro-assembler-s390.h +++ b/src/codegen/s390/macro-assembler-s390.h @@ -41,9 +41,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, Register reg5 = no_reg, Register reg6 = no_reg); -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; void CallBuiltin(Builtin builtin, Condition cond = al); void TailCallBuiltin(Builtin builtin, Condition cond = al); @@ -1502,22 +1502,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void CountTrailingZerosU64(Register dst, Register src, Register scratch_pair = r0); - private: - static const int kSmiShift = kSmiTagSize + kSmiShiftSize; - - void CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments); - - void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); - int CalculateStackPassedWords(int num_reg_arguments, - int num_double_arguments); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - void LoadStackLimit(Register destination, StackLimitKind kind); // It assumes that the arguments are located below the stack pointer. // argc is the number of arguments not including the receiver. @@ -1803,6 +1787,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { private: static const int kSmiShift = kSmiTagSize + kSmiShiftSize; + + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments); + + void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); + int CalculateStackPassedWords(int num_reg_arguments, + int num_double_arguments); + // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, Label* done, diff --git a/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc index 60e1bce38e..fb7fb8d582 100644 --- a/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc +++ b/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc @@ -27,7 +27,7 @@ namespace v8 { namespace internal { -void SharedTurboAssembler::Move(Register dst, uint32_t src) { +void SharedMacroAssemblerBase::Move(Register dst, uint32_t src) { // Helper to paper over the different assembler function names. #if V8_TARGET_ARCH_IA32 mov(dst, Immediate(src)); @@ -38,7 +38,7 @@ void SharedTurboAssembler::Move(Register dst, uint32_t src) { #endif } -void SharedTurboAssembler::Move(Register dst, Register src) { +void SharedMacroAssemblerBase::Move(Register dst, Register src) { // Helper to paper over the different assembler function names. if (dst != src) { #if V8_TARGET_ARCH_IA32 @@ -51,7 +51,7 @@ void SharedTurboAssembler::Move(Register dst, Register src) { } } -void SharedTurboAssembler::Add(Register dst, Immediate src) { +void SharedMacroAssemblerBase::Add(Register dst, Immediate src) { // Helper to paper over the different assembler function names. #if V8_TARGET_ARCH_IA32 add(dst, src); @@ -62,7 +62,7 @@ void SharedTurboAssembler::Add(Register dst, Immediate src) { #endif } -void SharedTurboAssembler::And(Register dst, Immediate src) { +void SharedMacroAssemblerBase::And(Register dst, Immediate src) { // Helper to paper over the different assembler function names. #if V8_TARGET_ARCH_IA32 and_(dst, src); @@ -77,8 +77,8 @@ void SharedTurboAssembler::And(Register dst, Immediate src) { #endif } -void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1, - Operand src2) { +void SharedMacroAssemblerBase::Movhps(XMMRegister dst, XMMRegister src1, + Operand src2) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vmovhps(dst, src1, src2); @@ -90,8 +90,8 @@ void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1, - Operand src2) { +void SharedMacroAssemblerBase::Movlps(XMMRegister dst, XMMRegister src1, + Operand src2) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vmovlps(dst, src1, src2); @@ -102,8 +102,8 @@ void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1, movlps(dst, src2); } } -void SharedTurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister mask) { +void SharedMacroAssemblerBase::Blendvpd(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister mask) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vblendvpd(dst, src1, src2, mask); @@ -115,8 +115,8 @@ void SharedTurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister mask) { +void SharedMacroAssemblerBase::Blendvps(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister mask) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vblendvps(dst, src1, src2, mask); @@ -128,8 +128,8 @@ void SharedTurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister mask) { +void SharedMacroAssemblerBase::Pblendvb(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister mask) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vpblendvb(dst, src1, src2, mask); @@ -141,8 +141,8 @@ void SharedTurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1, - XMMRegister src2, uint8_t imm8) { +void SharedMacroAssemblerBase::Shufps(XMMRegister dst, XMMRegister src1, + XMMRegister src2, uint8_t imm8) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); vshufps(dst, src1, src2, imm8); @@ -154,8 +154,8 @@ void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src, - uint8_t lane) { +void SharedMacroAssemblerBase::F64x2ExtractLane(DoubleRegister dst, + XMMRegister src, uint8_t lane) { ASM_CODE_COMMENT(this); if (lane == 0) { if (dst != src) { @@ -173,8 +173,10 @@ void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src, } } -void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src, - DoubleRegister rep, uint8_t lane) { +void SharedMacroAssemblerBase::F64x2ReplaceLane(XMMRegister dst, + XMMRegister src, + DoubleRegister rep, + uint8_t lane) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); @@ -197,8 +199,8 @@ void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src, } } -void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs, - XMMRegister rhs, XMMRegister scratch) { +void SharedMacroAssemblerBase::F32x4Min(XMMRegister dst, XMMRegister lhs, + XMMRegister rhs, XMMRegister scratch) { ASM_CODE_COMMENT(this); // The minps instruction doesn't propagate NaNs and +0's in its first // operand. Perform minps in both orders, merge the results, and adjust. @@ -226,8 +228,8 @@ void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs, Andnps(dst, dst, scratch); } -void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs, - XMMRegister rhs, XMMRegister scratch) { +void SharedMacroAssemblerBase::F32x4Max(XMMRegister dst, XMMRegister lhs, + XMMRegister rhs, XMMRegister scratch) { ASM_CODE_COMMENT(this); // The maxps instruction doesn't propagate NaNs and +0's in its first // operand. Perform maxps in both orders, merge the results, and adjust. @@ -258,8 +260,8 @@ void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs, Andnps(dst, dst, scratch); } -void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs, - XMMRegister rhs, XMMRegister scratch) { +void SharedMacroAssemblerBase::F64x2Min(XMMRegister dst, XMMRegister lhs, + XMMRegister rhs, XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); @@ -296,8 +298,8 @@ void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs, } } -void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs, - XMMRegister rhs, XMMRegister scratch) { +void SharedMacroAssemblerBase::F64x2Max(XMMRegister dst, XMMRegister lhs, + XMMRegister rhs, XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); @@ -336,7 +338,7 @@ void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs, } } -void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) { +void SharedMacroAssemblerBase::F32x4Splat(XMMRegister dst, DoubleRegister src) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX2)) { CpuFeatureScope avx2_scope(this, AVX2); @@ -354,8 +356,8 @@ void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) { } } -void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src, - uint8_t lane) { +void SharedMacroAssemblerBase::F32x4ExtractLane(FloatRegister dst, + XMMRegister src, uint8_t lane) { ASM_CODE_COMMENT(this); DCHECK_LT(lane, 4); // These instructions are shorter than insertps, but will leave junk in @@ -376,8 +378,8 @@ void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src, } } -void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src, - uint8_t laneidx) { +void SharedMacroAssemblerBase::S128Store32Lane(Operand dst, XMMRegister src, + uint8_t laneidx) { ASM_CODE_COMMENT(this); if (laneidx == 0) { Movss(dst, src); @@ -388,8 +390,8 @@ void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src, } template -void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I8x16SplatPreAvx2(XMMRegister dst, Op src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); DCHECK(!CpuFeatures::IsSupported(AVX2)); CpuFeatureScope ssse3_scope(this, SSSE3); @@ -398,8 +400,8 @@ void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src, Pshufb(dst, scratch); } -void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I8x16Splat(XMMRegister dst, Register src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX2)) { CpuFeatureScope avx2_scope(this, AVX2); @@ -410,8 +412,8 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src, } } -void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I8x16Splat(XMMRegister dst, Operand src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); DCHECK_OPERAND_IS_NOT_REG(src); if (CpuFeatures::IsSupported(AVX2)) { @@ -422,9 +424,9 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src, } } -void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1, - uint8_t src2, Register tmp1, - XMMRegister tmp2) { +void SharedMacroAssemblerBase::I8x16Shl(XMMRegister dst, XMMRegister src1, + uint8_t src2, Register tmp1, + XMMRegister tmp2) { ASM_CODE_COMMENT(this); DCHECK_NE(dst, tmp2); // Perform 16-bit shift, then mask away low bits. @@ -444,9 +446,9 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1, Pand(dst, tmp2); } -void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1, - Register src2, Register tmp1, - XMMRegister tmp2, XMMRegister tmp3) { +void SharedMacroAssemblerBase::I8x16Shl(XMMRegister dst, XMMRegister src1, + Register src2, Register tmp1, + XMMRegister tmp2, XMMRegister tmp3) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(dst, tmp2, tmp3)); DCHECK(!AreAliased(src1, tmp2, tmp3)); @@ -471,8 +473,8 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1, Psllw(dst, dst, tmp3); } -void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1, - uint8_t src2, XMMRegister tmp) { +void SharedMacroAssemblerBase::I8x16ShrS(XMMRegister dst, XMMRegister src1, + uint8_t src2, XMMRegister tmp) { ASM_CODE_COMMENT(this); // Unpack bytes into words, do word (16-bit) shifts, and repack. DCHECK_NE(dst, tmp); @@ -485,9 +487,9 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1, Packsswb(dst, tmp); } -void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1, - Register src2, Register tmp1, - XMMRegister tmp2, XMMRegister tmp3) { +void SharedMacroAssemblerBase::I8x16ShrS(XMMRegister dst, XMMRegister src1, + Register src2, Register tmp1, + XMMRegister tmp2, XMMRegister tmp3) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(dst, tmp2, tmp3)); DCHECK_NE(src1, tmp2); @@ -506,9 +508,9 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1, Packsswb(dst, tmp2); } -void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1, - uint8_t src2, Register tmp1, - XMMRegister tmp2) { +void SharedMacroAssemblerBase::I8x16ShrU(XMMRegister dst, XMMRegister src1, + uint8_t src2, Register tmp1, + XMMRegister tmp2) { ASM_CODE_COMMENT(this); DCHECK_NE(dst, tmp2); if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) { @@ -528,9 +530,9 @@ void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1, Pand(dst, tmp2); } -void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1, - Register src2, Register tmp1, - XMMRegister tmp2, XMMRegister tmp3) { +void SharedMacroAssemblerBase::I8x16ShrU(XMMRegister dst, XMMRegister src1, + Register src2, Register tmp1, + XMMRegister tmp2, XMMRegister tmp3) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(dst, tmp2, tmp3)); DCHECK_NE(src1, tmp2); @@ -550,14 +552,14 @@ void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1, } template -void SharedTurboAssembler::I16x8SplatPreAvx2(XMMRegister dst, Op src) { +void SharedMacroAssemblerBase::I16x8SplatPreAvx2(XMMRegister dst, Op src) { DCHECK(!CpuFeatures::IsSupported(AVX2)); Movd(dst, src); Pshuflw(dst, dst, uint8_t{0x0}); Punpcklqdq(dst, dst); } -void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) { +void SharedMacroAssemblerBase::I16x8Splat(XMMRegister dst, Register src) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX2)) { CpuFeatureScope avx2_scope(this, AVX2); @@ -568,7 +570,7 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) { } } -void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) { +void SharedMacroAssemblerBase::I16x8Splat(XMMRegister dst, Operand src) { ASM_CODE_COMMENT(this); DCHECK_OPERAND_IS_NOT_REG(src); if (CpuFeatures::IsSupported(AVX2)) { @@ -579,18 +581,20 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) { } } -void SharedTurboAssembler::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister scratch, - bool is_signed) { +void SharedMacroAssemblerBase::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1, + XMMRegister src2, + XMMRegister scratch, + bool is_signed) { ASM_CODE_COMMENT(this); is_signed ? Pmovsxbw(scratch, src1) : Pmovzxbw(scratch, src1); is_signed ? Pmovsxbw(dst, src2) : Pmovzxbw(dst, src2); Pmullw(dst, scratch); } -void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1, - XMMRegister src2, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I16x8ExtMulHighS(XMMRegister dst, + XMMRegister src1, + XMMRegister src2, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -612,9 +616,10 @@ void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1, - XMMRegister src2, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I16x8ExtMulHighU(XMMRegister dst, + XMMRegister src1, + XMMRegister src2, + XMMRegister scratch) { ASM_CODE_COMMENT(this); // The logic here is slightly complicated to handle all the cases of register // aliasing. This allows flexibility for callers in TurboFan and Liftoff. @@ -662,8 +667,8 @@ void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst, - XMMRegister src) { +void SharedMacroAssemblerBase::I16x8SConvertI8x16High(XMMRegister dst, + XMMRegister src) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -685,9 +690,9 @@ void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst, } } -void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst, - XMMRegister src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I16x8UConvertI8x16High(XMMRegister dst, + XMMRegister src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -711,9 +716,10 @@ void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst, } } -void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, - XMMRegister src2, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I16x8Q15MulRSatS(XMMRegister dst, + XMMRegister src1, + XMMRegister src2, + XMMRegister scratch) { ASM_CODE_COMMENT(this); // k = i16x8.splat(0x8000) Pcmpeqd(scratch, scratch); @@ -729,9 +735,9 @@ void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, Pxor(dst, scratch); } -void SharedTurboAssembler::I16x8DotI8x16I7x16S(XMMRegister dst, - XMMRegister src1, - XMMRegister src2) { +void SharedMacroAssemblerBase::I16x8DotI8x16I7x16S(XMMRegister dst, + XMMRegister src1, + XMMRegister src2) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -744,7 +750,7 @@ void SharedTurboAssembler::I16x8DotI8x16I7x16S(XMMRegister dst, } } -void SharedTurboAssembler::I32x4DotI8x16I7x16AddS( +void SharedMacroAssemblerBase::I32x4DotI8x16I7x16AddS( XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister src3, XMMRegister scratch, XMMRegister splat_reg) { ASM_CODE_COMMENT(this); @@ -768,9 +774,9 @@ void SharedTurboAssembler::I32x4DotI8x16I7x16AddS( } } -void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst, - XMMRegister src, - XMMRegister tmp) { +void SharedMacroAssemblerBase::I32x4ExtAddPairwiseI16x8U(XMMRegister dst, + XMMRegister src, + XMMRegister tmp) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -812,9 +818,10 @@ void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst, // 1. Multiply low word into scratch. // 2. Multiply high word (can be signed or unsigned) into dst. // 3. Unpack and interleave scratch and dst into dst. -void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister scratch, - bool low, bool is_signed) { +void SharedMacroAssemblerBase::I32x4ExtMul(XMMRegister dst, XMMRegister src1, + XMMRegister src2, + XMMRegister scratch, bool low, + bool is_signed) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -830,8 +837,8 @@ void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst, - XMMRegister src) { +void SharedMacroAssemblerBase::I32x4SConvertI16x8High(XMMRegister dst, + XMMRegister src) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -853,9 +860,9 @@ void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst, } } -void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst, - XMMRegister src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I32x4UConvertI16x8High(XMMRegister dst, + XMMRegister src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -879,8 +886,8 @@ void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst, } } -void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I64x2Neg(XMMRegister dst, XMMRegister src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); @@ -896,8 +903,8 @@ void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src, } } -void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I64x2Abs(XMMRegister dst, XMMRegister src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -917,8 +924,8 @@ void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src, } } -void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0, - XMMRegister src1, XMMRegister scratch) { +void SharedMacroAssemblerBase::I64x2GtS(XMMRegister dst, XMMRegister src0, + XMMRegister src1, XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -951,8 +958,8 @@ void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0, } } -void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0, - XMMRegister src1, XMMRegister scratch) { +void SharedMacroAssemblerBase::I64x2GeS(XMMRegister dst, XMMRegister src0, + XMMRegister src1, XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -986,8 +993,8 @@ void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0, } } -void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src, - uint8_t shift, XMMRegister xmm_tmp) { +void SharedMacroAssemblerBase::I64x2ShrS(XMMRegister dst, XMMRegister src, + uint8_t shift, XMMRegister xmm_tmp) { ASM_CODE_COMMENT(this); DCHECK_GT(64, shift); DCHECK_NE(xmm_tmp, dst); @@ -1019,10 +1026,10 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src, Psubq(dst, xmm_tmp); } -void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src, - Register shift, XMMRegister xmm_tmp, - XMMRegister xmm_shift, - Register tmp_shift) { +void SharedMacroAssemblerBase::I64x2ShrS(XMMRegister dst, XMMRegister src, + Register shift, XMMRegister xmm_tmp, + XMMRegister xmm_shift, + Register tmp_shift) { ASM_CODE_COMMENT(this); DCHECK_NE(xmm_tmp, dst); DCHECK_NE(xmm_tmp, src); @@ -1049,9 +1056,9 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src, Psubq(dst, xmm_tmp); } -void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs, - XMMRegister rhs, XMMRegister tmp1, - XMMRegister tmp2) { +void SharedMacroAssemblerBase::I64x2Mul(XMMRegister dst, XMMRegister lhs, + XMMRegister rhs, XMMRegister tmp1, + XMMRegister tmp2) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(dst, tmp1, tmp2)); DCHECK(!AreAliased(lhs, tmp1, tmp2)); @@ -1099,9 +1106,10 @@ void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs, // 2. Unpack src1, src0 into even-number elements of dst. // 3. Multiply 1. with 2. // For non-AVX, use non-destructive pshufd instead of punpckldq/punpckhdq. -void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister scratch, - bool low, bool is_signed) { +void SharedMacroAssemblerBase::I64x2ExtMul(XMMRegister dst, XMMRegister src1, + XMMRegister src2, + XMMRegister scratch, bool low, + bool is_signed) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -1130,8 +1138,8 @@ void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst, - XMMRegister src) { +void SharedMacroAssemblerBase::I64x2SConvertI32x4High(XMMRegister dst, + XMMRegister src) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -1148,9 +1156,9 @@ void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst, } } -void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst, - XMMRegister src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I64x2UConvertI32x4High(XMMRegister dst, + XMMRegister src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -1170,8 +1178,8 @@ void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst, } } -void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::S128Not(XMMRegister dst, XMMRegister src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (dst == src) { Pcmpeqd(scratch, scratch); @@ -1182,9 +1190,9 @@ void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src, } } -void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask, - XMMRegister src1, XMMRegister src2, - XMMRegister scratch) { +void SharedMacroAssemblerBase::S128Select(XMMRegister dst, XMMRegister mask, + XMMRegister src1, XMMRegister src2, + XMMRegister scratch) { ASM_CODE_COMMENT(this); // v128.select = v128.or(v128.and(v1, c), v128.andnot(v2, c)). // pandn(x, y) = !x & y, so we have to flip the mask and input. @@ -1203,8 +1211,8 @@ void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask, } } -void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::S128Load8Splat(XMMRegister dst, Operand src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); // The trap handler uses the current pc to creating a landing, so that it can // determine if a trap occured in Wasm code due to a OOB load. Make sure the @@ -1226,8 +1234,8 @@ void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src, } } -void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::S128Load16Splat(XMMRegister dst, Operand src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); // The trap handler uses the current pc to creating a landing, so that it can // determine if a trap occured in Wasm code due to a OOB load. Make sure the @@ -1248,7 +1256,7 @@ void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src, } } -void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) { +void SharedMacroAssemblerBase::S128Load32Splat(XMMRegister dst, Operand src) { ASM_CODE_COMMENT(this); // The trap handler uses the current pc to creating a landing, so that it can // determine if a trap occured in Wasm code due to a OOB load. Make sure the @@ -1262,8 +1270,8 @@ void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) { } } -void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src, - uint8_t laneidx) { +void SharedMacroAssemblerBase::S128Store64Lane(Operand dst, XMMRegister src, + uint8_t laneidx) { ASM_CODE_COMMENT(this); if (laneidx == 0) { Movlps(dst, src); @@ -1342,27 +1350,27 @@ void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src, sub##ps_or_pd(dst, tmp); \ } -void SharedTurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister src3, - XMMRegister tmp) { +void SharedMacroAssemblerBase::F32x4Qfma(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister src3, + XMMRegister tmp) { QFMA(ps) } -void SharedTurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister src3, - XMMRegister tmp) { +void SharedMacroAssemblerBase::F32x4Qfms(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister src3, + XMMRegister tmp) { QFMS(ps) } -void SharedTurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister src3, - XMMRegister tmp) { +void SharedMacroAssemblerBase::F64x2Qfma(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister src3, + XMMRegister tmp) { QFMA(pd); } -void SharedTurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister src3, - XMMRegister tmp) { +void SharedMacroAssemblerBase::F64x2Qfms(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister src3, + XMMRegister tmp) { QFMS(pd); } diff --git a/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h index 66106f90fb..ae97572783 100644 --- a/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h +++ b/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h @@ -8,7 +8,7 @@ #include "src/base/macros.h" #include "src/codegen/cpu-features.h" #include "src/codegen/external-reference.h" -#include "src/codegen/turbo-assembler.h" +#include "src/codegen/macro-assembler-base.h" #if V8_TARGET_ARCH_IA32 #include "src/codegen/ia32/register-ia32.h" @@ -30,15 +30,15 @@ constexpr int kStackSavedSavedFPSize = 2 * kDoubleSize; constexpr int kStackSavedSavedFPSize = kDoubleSize; #endif // V8_ENABLE_WEBASSEMBLY -// Base class for SharedTurboAssemblerBase. This class contains macro-assembler +// Base class for SharedMacroAssembler. This class contains macro-assembler // functions that can be shared across ia32 and x64 without any template // machinery, i.e. does not require the CRTP pattern that -// SharedTurboAssemblerBase exposes. This allows us to keep the bulk of +// SharedMacroAssembler exposes. This allows us to keep the bulk of // definition inside a separate source file, rather than putting everything // inside this header. -class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE SharedMacroAssemblerBase : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; void Move(Register dst, uint32_t src); // Move if registers are not identical. @@ -530,41 +530,41 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase { void I16x8SplatPreAvx2(XMMRegister dst, Op src); }; -// Common base class template shared by ia32 and x64 TurboAssembler. This uses +// Common base class template shared by ia32 and x64 MacroAssembler. This uses // the Curiously Recurring Template Pattern (CRTP), where Impl is the actual -// class (subclass of SharedTurboAssemblerBase instantiated with the actual +// class (subclass of SharedMacroAssembler instantiated with the actual // class). This allows static polymorphism, where member functions can be move -// into SharedTurboAssembler, and we can also call into member functions -// defined in ia32 or x64 specific TurboAssembler from within this template +// into SharedMacroAssemblerBase, and we can also call into member functions +// defined in ia32 or x64 specific MacroAssembler from within this template // class, via Impl. // // Note: all member functions must be defined in this header file so that the // compiler can generate code for the function definitions. See // https://isocpp.org/wiki/faq/templates#templates-defn-vs-decl for rationale. -// If a function does not need polymorphism, move it into SharedTurboAssembler, -// and define it outside of this header. +// If a function does not need polymorphism, move it into +// SharedMacroAssemblerBase, and define it outside of this header. template -class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler { - using SharedTurboAssembler::SharedTurboAssembler; +class V8_EXPORT_PRIVATE SharedMacroAssembler : public SharedMacroAssemblerBase { + using SharedMacroAssemblerBase::SharedMacroAssemblerBase; public: void Abspd(XMMRegister dst, XMMRegister src, Register tmp) { - FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps, + FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Andps, ExternalReference::address_of_double_abs_constant()); } void Absps(XMMRegister dst, XMMRegister src, Register tmp) { - FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps, + FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Andps, ExternalReference::address_of_float_abs_constant()); } void Negpd(XMMRegister dst, XMMRegister src, Register tmp) { - FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps, + FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Xorps, ExternalReference::address_of_double_neg_constant()); } void Negps(XMMRegister dst, XMMRegister src, Register tmp) { - FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps, + FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Xorps, ExternalReference::address_of_float_neg_constant()); } #undef FLOAT_UNOP @@ -975,15 +975,16 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler { return impl()->ExternalReferenceAsOperand(reference, scratch); } - using FloatInstruction = void (SharedTurboAssembler::*)(XMMRegister, - XMMRegister, Operand); + using FloatInstruction = void (SharedMacroAssemblerBase::*)(XMMRegister, + XMMRegister, + Operand); void FloatUnop(XMMRegister dst, XMMRegister src, Register tmp, FloatInstruction op, ExternalReference ext) { if (!CpuFeatures::IsSupported(AVX) && (dst != src)) { movaps(dst, src); src = dst; } - SharedTurboAssembler* assm = this; + SharedMacroAssemblerBase* assm = this; (assm->*op)(dst, src, ExternalReferenceAsOperand(ext, tmp)); } }; diff --git a/src/codegen/x64/macro-assembler-x64.cc b/src/codegen/x64/macro-assembler-x64.cc index cab8b87d8d..b8d2120c84 100644 --- a/src/codegen/x64/macro-assembler-x64.cc +++ b/src/codegen/x64/macro-assembler-x64.cc @@ -81,7 +81,7 @@ void MacroAssembler::Store(ExternalReference destination, Register source) { } } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); @@ -90,7 +90,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index))); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { DCHECK(is_int32(offset)); if (offset == 0) { @@ -100,11 +100,11 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { movq(destination, Operand(kRootRegister, offset)); } -void TurboAssembler::LoadAddress(Register destination, +void MacroAssembler::LoadAddress(Register destination, ExternalReference source) { if (root_array_available_ && options().enable_root_relative_access) { intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source); @@ -124,7 +124,7 @@ void TurboAssembler::LoadAddress(Register destination, Move(destination, source); } -Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference, +Operand MacroAssembler::ExternalReferenceAsOperand(ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t delta = @@ -158,12 +158,12 @@ void MacroAssembler::PushAddress(ExternalReference source) { Push(kScratchRegister); } -Operand TurboAssembler::RootAsOperand(RootIndex index) { +Operand MacroAssembler::RootAsOperand(RootIndex index) { DCHECK(root_array_available()); return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)); } -void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) { if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { mov_tagged(destination, Immediate(ReadOnlyRootPtr(index))); return; @@ -172,7 +172,7 @@ void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { movq(destination, RootAsOperand(index)); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadRoot(Register destination, RootIndex index) { if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { DecompressTaggedPointer(destination, ReadOnlyRootPtr(index)); return; @@ -186,7 +186,7 @@ void MacroAssembler::PushRoot(RootIndex index) { Push(RootAsOperand(index)); } -void TurboAssembler::CompareRoot(Register with, RootIndex index) { +void MacroAssembler::CompareRoot(Register with, RootIndex index) { if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { cmp_tagged(with, Immediate(ReadOnlyRootPtr(index))); return; @@ -201,7 +201,7 @@ void TurboAssembler::CompareRoot(Register with, RootIndex index) { } } -void TurboAssembler::CompareRoot(Operand with, RootIndex index) { +void MacroAssembler::CompareRoot(Operand with, RootIndex index) { if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { cmp_tagged(with, Immediate(ReadOnlyRootPtr(index))); return; @@ -219,7 +219,7 @@ void TurboAssembler::CompareRoot(Operand with, RootIndex index) { } } -void TurboAssembler::LoadMap(Register destination, Register object) { +void MacroAssembler::LoadMap(Register destination, Register object) { LoadTaggedPointerField(destination, FieldOperand(object, HeapObject::kMapOffset)); #ifdef V8_MAP_PACKING @@ -227,7 +227,7 @@ void TurboAssembler::LoadMap(Register destination, Register object) { #endif } -void TurboAssembler::LoadTaggedPointerField(Register destination, +void MacroAssembler::LoadTaggedPointerField(Register destination, Operand field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressTaggedPointer(destination, field_operand); @@ -236,7 +236,7 @@ void TurboAssembler::LoadTaggedPointerField(Register destination, } } -void TurboAssembler::LoadTaggedPointerField(TaggedRegister destination, +void MacroAssembler::LoadTaggedPointerField(TaggedRegister destination, Operand field_operand) { if (COMPRESS_POINTERS_BOOL) { movl(destination.reg(), field_operand); @@ -246,7 +246,7 @@ void TurboAssembler::LoadTaggedPointerField(TaggedRegister destination, } #ifdef V8_MAP_PACKING -void TurboAssembler::UnpackMapWord(Register r) { +void MacroAssembler::UnpackMapWord(Register r) { // Clear the top two bytes (which may include metadata). Must be in sync with // MapWord::Unpack, and vice versa. shlq(r, Immediate(16)); @@ -255,7 +255,7 @@ void TurboAssembler::UnpackMapWord(Register r) { } #endif -void TurboAssembler::LoadTaggedSignedField(Register destination, +void MacroAssembler::LoadTaggedSignedField(Register destination, Operand field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressTaggedSigned(destination, field_operand); @@ -264,7 +264,7 @@ void TurboAssembler::LoadTaggedSignedField(Register destination, } } -void TurboAssembler::LoadAnyTaggedField(Register destination, +void MacroAssembler::LoadAnyTaggedField(Register destination, Operand field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressAnyTagged(destination, field_operand); @@ -273,7 +273,7 @@ void TurboAssembler::LoadAnyTaggedField(Register destination, } } -void TurboAssembler::LoadAnyTaggedField(TaggedRegister destination, +void MacroAssembler::LoadAnyTaggedField(TaggedRegister destination, Operand field_operand) { if (COMPRESS_POINTERS_BOOL) { movl(destination.reg(), field_operand); @@ -282,7 +282,7 @@ void TurboAssembler::LoadAnyTaggedField(TaggedRegister destination, } } -void TurboAssembler::PushTaggedPointerField(Operand field_operand, +void MacroAssembler::PushTaggedPointerField(Operand field_operand, Register scratch) { if (COMPRESS_POINTERS_BOOL) { DCHECK(!field_operand.AddressUsesRegister(scratch)); @@ -293,7 +293,7 @@ void TurboAssembler::PushTaggedPointerField(Operand field_operand, } } -void TurboAssembler::PushTaggedAnyField(Operand field_operand, +void MacroAssembler::PushTaggedAnyField(Operand field_operand, Register scratch) { if (COMPRESS_POINTERS_BOOL) { DCHECK(!field_operand.AddressUsesRegister(scratch)); @@ -304,15 +304,15 @@ void TurboAssembler::PushTaggedAnyField(Operand field_operand, } } -void TurboAssembler::SmiUntagField(Register dst, Operand src) { +void MacroAssembler::SmiUntagField(Register dst, Operand src) { SmiUntag(dst, src); } -void TurboAssembler::SmiUntagFieldUnsigned(Register dst, Operand src) { +void MacroAssembler::SmiUntagFieldUnsigned(Register dst, Operand src) { SmiUntagUnsigned(dst, src); } -void TurboAssembler::StoreTaggedField(Operand dst_field_operand, +void MacroAssembler::StoreTaggedField(Operand dst_field_operand, Immediate value) { if (COMPRESS_POINTERS_BOOL) { movl(dst_field_operand, value); @@ -321,7 +321,7 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand, } } -void TurboAssembler::StoreTaggedField(Operand dst_field_operand, +void MacroAssembler::StoreTaggedField(Operand dst_field_operand, Register value) { if (COMPRESS_POINTERS_BOOL) { movl(dst_field_operand, value); @@ -330,7 +330,7 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand, } } -void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand, +void MacroAssembler::StoreTaggedSignedField(Operand dst_field_operand, Smi value) { if (SmiValuesAre32Bits()) { Move(kScratchRegister, value); @@ -340,7 +340,7 @@ void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand, } } -void TurboAssembler::AtomicStoreTaggedField(Operand dst_field_operand, +void MacroAssembler::AtomicStoreTaggedField(Operand dst_field_operand, Register value) { if (COMPRESS_POINTERS_BOOL) { movl(kScratchRegister, value); @@ -351,34 +351,34 @@ void TurboAssembler::AtomicStoreTaggedField(Operand dst_field_operand, } } -void TurboAssembler::DecompressTaggedSigned(Register destination, +void MacroAssembler::DecompressTaggedSigned(Register destination, Operand field_operand) { ASM_CODE_COMMENT(this); movl(destination, field_operand); } -void TurboAssembler::DecompressTaggedPointer(Register destination, +void MacroAssembler::DecompressTaggedPointer(Register destination, Operand field_operand) { ASM_CODE_COMMENT(this); movl(destination, field_operand); addq(destination, kPtrComprCageBaseRegister); } -void TurboAssembler::DecompressTaggedPointer(Register destination, +void MacroAssembler::DecompressTaggedPointer(Register destination, Register source) { ASM_CODE_COMMENT(this); movl(destination, source); addq(destination, kPtrComprCageBaseRegister); } -void TurboAssembler::DecompressAnyTagged(Register destination, +void MacroAssembler::DecompressAnyTagged(Register destination, Operand field_operand) { ASM_CODE_COMMENT(this); movl(destination, field_operand); addq(destination, kPtrComprCageBaseRegister); } -void TurboAssembler::DecompressTaggedPointer(Register destination, +void MacroAssembler::DecompressTaggedPointer(Register destination, Tagged_t immediate) { ASM_CODE_COMMENT(this); leaq(destination, @@ -427,7 +427,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } } -void TurboAssembler::EncodeSandboxedPointer(Register value) { +void MacroAssembler::EncodeSandboxedPointer(Register value) { ASM_CODE_COMMENT(this); #ifdef V8_ENABLE_SANDBOX subq(value, kPtrComprCageBaseRegister); @@ -437,7 +437,7 @@ void TurboAssembler::EncodeSandboxedPointer(Register value) { #endif } -void TurboAssembler::DecodeSandboxedPointer(Register value) { +void MacroAssembler::DecodeSandboxedPointer(Register value) { ASM_CODE_COMMENT(this); #ifdef V8_ENABLE_SANDBOX shrq(value, Immediate(kSandboxedPointerShift)); @@ -447,14 +447,14 @@ void TurboAssembler::DecodeSandboxedPointer(Register value) { #endif } -void TurboAssembler::LoadSandboxedPointerField(Register destination, +void MacroAssembler::LoadSandboxedPointerField(Register destination, Operand field_operand) { ASM_CODE_COMMENT(this); movq(destination, field_operand); DecodeSandboxedPointer(destination); } -void TurboAssembler::StoreSandboxedPointerField(Operand dst_field_operand, +void MacroAssembler::StoreSandboxedPointerField(Operand dst_field_operand, Register value) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(value, kScratchRegister)); @@ -464,7 +464,7 @@ void TurboAssembler::StoreSandboxedPointerField(Operand dst_field_operand, movq(dst_field_operand, kScratchRegister); } -void TurboAssembler::LoadExternalPointerField( +void MacroAssembler::LoadExternalPointerField( Register destination, Operand field_operand, ExternalPointerTag tag, Register scratch, IsolateRootLocation isolateRootLocation) { DCHECK(!AreAliased(destination, scratch)); @@ -493,7 +493,7 @@ void TurboAssembler::LoadExternalPointerField( #endif // V8_ENABLE_SANDBOX } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode) { ASM_CODE_COMMENT(this); @@ -508,12 +508,12 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, MovePair(slot_address_parameter, slot_address, object_parameter, object); Call(isolate()->builtins()->code_handle( - Builtins::GetEphemeronKeyBarrierStub(fp_mode)), - RelocInfo::CODE_TARGET); + Builtins::GetEphemeronKeyBarrierStub(fp_mode)), + RelocInfo::CODE_TARGET); PopAll(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -531,7 +531,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, PopAll(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { ASM_CODE_COMMENT(this); @@ -554,7 +554,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, } #ifdef V8_IS_TSAN -void TurboAssembler::CallTSANStoreStub(Register address, Register value, +void MacroAssembler::CallTSANStoreStub(Register address, Register value, SaveFPRegsMode fp_mode, int size, StubCallMode mode, std::memory_order order) { @@ -600,7 +600,7 @@ void TurboAssembler::CallTSANStoreStub(Register address, Register value, PopAll(registers); } -void TurboAssembler::CallTSANRelaxedLoadStub(Register address, +void MacroAssembler::CallTSANRelaxedLoadStub(Register address, SaveFPRegsMode fp_mode, int size, StubCallMode mode) { TSANLoadDescriptor descriptor; @@ -694,7 +694,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address, } } -void TurboAssembler::Check(Condition cc, AbortReason reason) { +void MacroAssembler::Check(Condition cc, AbortReason reason) { Label L; j(cc, &L, Label::kNear); Abort(reason); @@ -702,7 +702,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason) { bind(&L); } -void TurboAssembler::CheckStackAlignment() { +void MacroAssembler::CheckStackAlignment() { int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kSystemPointerSize) { @@ -717,7 +717,7 @@ void TurboAssembler::CheckStackAlignment() { } } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { ASM_CODE_COMMENT(this); if (v8_flags.code_comments) { const char* msg = GetAbortReason(reason); @@ -959,7 +959,7 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( jump_mode); } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) const { int bytes = 0; RegList saved_regs = kCallerSaved - exclusion; @@ -973,7 +973,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -985,7 +985,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { ASM_CODE_COMMENT(this); int bytes = 0; if (fp_mode == SaveFPRegsMode::kSave) { @@ -996,7 +996,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { return bytes; } -int TurboAssembler::PushAll(RegList registers) { +int MacroAssembler::PushAll(RegList registers) { int bytes = 0; for (Register reg : registers) { pushq(reg); @@ -1005,7 +1005,7 @@ int TurboAssembler::PushAll(RegList registers) { return bytes; } -int TurboAssembler::PopAll(RegList registers) { +int MacroAssembler::PopAll(RegList registers) { int bytes = 0; for (Register reg : base::Reversed(registers)) { popq(reg); @@ -1014,7 +1014,7 @@ int TurboAssembler::PopAll(RegList registers) { return bytes; } -int TurboAssembler::PushAll(DoubleRegList registers, int stack_slot_size) { +int MacroAssembler::PushAll(DoubleRegList registers, int stack_slot_size) { if (registers.is_empty()) return 0; const int delta = stack_slot_size * registers.Count(); AllocateStackSpace(delta); @@ -1032,7 +1032,7 @@ int TurboAssembler::PushAll(DoubleRegList registers, int stack_slot_size) { return delta; } -int TurboAssembler::PopAll(DoubleRegList registers, int stack_slot_size) { +int MacroAssembler::PopAll(DoubleRegList registers, int stack_slot_size) { if (registers.is_empty()) return 0; int slot = 0; for (XMMRegister reg : registers) { @@ -1049,7 +1049,7 @@ int TurboAssembler::PopAll(DoubleRegList registers, int stack_slot_size) { return slot; } -void TurboAssembler::Movq(XMMRegister dst, Register src) { +void MacroAssembler::Movq(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); vmovq(dst, src); @@ -1058,7 +1058,7 @@ void TurboAssembler::Movq(XMMRegister dst, Register src) { } } -void TurboAssembler::Movq(Register dst, XMMRegister src) { +void MacroAssembler::Movq(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); vmovq(dst, src); @@ -1067,7 +1067,7 @@ void TurboAssembler::Movq(Register dst, XMMRegister src) { } } -void TurboAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) { +void MacroAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); vpextrq(dst, src, imm8); @@ -1077,7 +1077,7 @@ void TurboAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) { } } -void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) { +void MacroAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtss2sd(dst, src, src); @@ -1086,7 +1086,7 @@ void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) { } } -void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtss2sd(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtss2sd(dst, dst, src); @@ -1095,7 +1095,7 @@ void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) { } } -void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) { +void MacroAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtsd2ss(dst, src, src); @@ -1104,7 +1104,7 @@ void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) { } } -void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtsd2ss(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtsd2ss(dst, dst, src); @@ -1113,7 +1113,7 @@ void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) { } } -void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) { +void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtlsi2sd(dst, kScratchDoubleReg, src); @@ -1123,7 +1123,7 @@ void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) { } } -void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtlsi2sd(dst, kScratchDoubleReg, src); @@ -1133,7 +1133,7 @@ void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) { } } -void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) { +void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtlsi2ss(dst, kScratchDoubleReg, src); @@ -1143,7 +1143,7 @@ void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) { } } -void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtlsi2ss(dst, kScratchDoubleReg, src); @@ -1153,7 +1153,7 @@ void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) { } } -void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) { +void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtqsi2ss(dst, kScratchDoubleReg, src); @@ -1163,7 +1163,7 @@ void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) { } } -void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtqsi2ss(dst, kScratchDoubleReg, src); @@ -1173,7 +1173,7 @@ void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) { } } -void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) { +void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtqsi2sd(dst, kScratchDoubleReg, src); @@ -1183,7 +1183,7 @@ void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) { } } -void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtqsi2sd(dst, kScratchDoubleReg, src); @@ -1193,31 +1193,31 @@ void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) { } } -void TurboAssembler::Cvtlui2ss(XMMRegister dst, Register src) { +void MacroAssembler::Cvtlui2ss(XMMRegister dst, Register src) { // Zero-extend the 32 bit value to 64 bit. movl(kScratchRegister, src); Cvtqsi2ss(dst, kScratchRegister); } -void TurboAssembler::Cvtlui2ss(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtlui2ss(XMMRegister dst, Operand src) { // Zero-extend the 32 bit value to 64 bit. movl(kScratchRegister, src); Cvtqsi2ss(dst, kScratchRegister); } -void TurboAssembler::Cvtlui2sd(XMMRegister dst, Register src) { +void MacroAssembler::Cvtlui2sd(XMMRegister dst, Register src) { // Zero-extend the 32 bit value to 64 bit. movl(kScratchRegister, src); Cvtqsi2sd(dst, kScratchRegister); } -void TurboAssembler::Cvtlui2sd(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtlui2sd(XMMRegister dst, Operand src) { // Zero-extend the 32 bit value to 64 bit. movl(kScratchRegister, src); Cvtqsi2sd(dst, kScratchRegister); } -void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) { +void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src) { Label done; Cvtqsi2ss(dst, src); testq(src, src); @@ -1236,12 +1236,12 @@ void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) { bind(&done); } -void TurboAssembler::Cvtqui2ss(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtqui2ss(XMMRegister dst, Operand src) { movq(kScratchRegister, src); Cvtqui2ss(dst, kScratchRegister); } -void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) { +void MacroAssembler::Cvtqui2sd(XMMRegister dst, Register src) { Label done; Cvtqsi2sd(dst, src); testq(src, src); @@ -1260,12 +1260,12 @@ void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) { bind(&done); } -void TurboAssembler::Cvtqui2sd(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtqui2sd(XMMRegister dst, Operand src) { movq(kScratchRegister, src); Cvtqui2sd(dst, kScratchRegister); } -void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) { +void MacroAssembler::Cvttss2si(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttss2si(dst, src); @@ -1274,7 +1274,7 @@ void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) { } } -void TurboAssembler::Cvttss2si(Register dst, Operand src) { +void MacroAssembler::Cvttss2si(Register dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttss2si(dst, src); @@ -1283,7 +1283,7 @@ void TurboAssembler::Cvttss2si(Register dst, Operand src) { } } -void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) { +void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttsd2si(dst, src); @@ -1292,7 +1292,7 @@ void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) { } } -void TurboAssembler::Cvttsd2si(Register dst, Operand src) { +void MacroAssembler::Cvttsd2si(Register dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttsd2si(dst, src); @@ -1301,7 +1301,7 @@ void TurboAssembler::Cvttsd2si(Register dst, Operand src) { } } -void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) { +void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttss2siq(dst, src); @@ -1310,7 +1310,7 @@ void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) { } } -void TurboAssembler::Cvttss2siq(Register dst, Operand src) { +void MacroAssembler::Cvttss2siq(Register dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttss2siq(dst, src); @@ -1319,7 +1319,7 @@ void TurboAssembler::Cvttss2siq(Register dst, Operand src) { } } -void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) { +void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttsd2siq(dst, src); @@ -1328,7 +1328,7 @@ void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) { } } -void TurboAssembler::Cvttsd2siq(Register dst, Operand src) { +void MacroAssembler::Cvttsd2siq(Register dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttsd2siq(dst, src); @@ -1339,115 +1339,115 @@ void TurboAssembler::Cvttsd2siq(Register dst, Operand src) { namespace { template -void ConvertFloatToUint64(TurboAssembler* tasm, Register dst, +void ConvertFloatToUint64(MacroAssembler* masm, Register dst, OperandOrXMMRegister src, Label* fail) { Label success; // There does not exist a native float-to-uint instruction, so we have to use // a float-to-int, and postprocess the result. if (is_double) { - tasm->Cvttsd2siq(dst, src); + masm->Cvttsd2siq(dst, src); } else { - tasm->Cvttss2siq(dst, src); + masm->Cvttss2siq(dst, src); } // If the result of the conversion is positive, we are already done. - tasm->testq(dst, dst); - tasm->j(positive, &success); + masm->testq(dst, dst); + masm->j(positive, &success); // The result of the first conversion was negative, which means that the // input value was not within the positive int64 range. We subtract 2^63 // and convert it again to see if it is within the uint64 range. if (is_double) { - tasm->Move(kScratchDoubleReg, -9223372036854775808.0); - tasm->Addsd(kScratchDoubleReg, src); - tasm->Cvttsd2siq(dst, kScratchDoubleReg); + masm->Move(kScratchDoubleReg, -9223372036854775808.0); + masm->Addsd(kScratchDoubleReg, src); + masm->Cvttsd2siq(dst, kScratchDoubleReg); } else { - tasm->Move(kScratchDoubleReg, -9223372036854775808.0f); - tasm->Addss(kScratchDoubleReg, src); - tasm->Cvttss2siq(dst, kScratchDoubleReg); + masm->Move(kScratchDoubleReg, -9223372036854775808.0f); + masm->Addss(kScratchDoubleReg, src); + masm->Cvttss2siq(dst, kScratchDoubleReg); } - tasm->testq(dst, dst); + masm->testq(dst, dst); // The only possible negative value here is 0x8000000000000000, which is // used on x64 to indicate an integer overflow. - tasm->j(negative, fail ? fail : &success); + masm->j(negative, fail ? fail : &success); // The input value is within uint64 range and the second conversion worked // successfully, but we still have to undo the subtraction we did // earlier. - tasm->Move(kScratchRegister, 0x8000000000000000); - tasm->orq(dst, kScratchRegister); - tasm->bind(&success); + masm->Move(kScratchRegister, 0x8000000000000000); + masm->orq(dst, kScratchRegister); + masm->bind(&success); } template -void ConvertFloatToUint32(TurboAssembler* tasm, Register dst, +void ConvertFloatToUint32(MacroAssembler* masm, Register dst, OperandOrXMMRegister src, Label* fail) { Label success; // There does not exist a native float-to-uint instruction, so we have to use // a float-to-int, and postprocess the result. if (is_double) { - tasm->Cvttsd2si(dst, src); + masm->Cvttsd2si(dst, src); } else { - tasm->Cvttss2si(dst, src); + masm->Cvttss2si(dst, src); } // If the result of the conversion is positive, we are already done. - tasm->testl(dst, dst); - tasm->j(positive, &success); + masm->testl(dst, dst); + masm->j(positive, &success); // The result of the first conversion was negative, which means that the // input value was not within the positive int32 range. We subtract 2^31 // and convert it again to see if it is within the uint32 range. if (is_double) { - tasm->Move(kScratchDoubleReg, -2147483648.0); - tasm->Addsd(kScratchDoubleReg, src); - tasm->Cvttsd2si(dst, kScratchDoubleReg); + masm->Move(kScratchDoubleReg, -2147483648.0); + masm->Addsd(kScratchDoubleReg, src); + masm->Cvttsd2si(dst, kScratchDoubleReg); } else { - tasm->Move(kScratchDoubleReg, -2147483648.0f); - tasm->Addss(kScratchDoubleReg, src); - tasm->Cvttss2si(dst, kScratchDoubleReg); + masm->Move(kScratchDoubleReg, -2147483648.0f); + masm->Addss(kScratchDoubleReg, src); + masm->Cvttss2si(dst, kScratchDoubleReg); } - tasm->testl(dst, dst); + masm->testl(dst, dst); // The only possible negative value here is 0x80000000, which is // used on x64 to indicate an integer overflow. - tasm->j(negative, fail ? fail : &success); + masm->j(negative, fail ? fail : &success); // The input value is within uint32 range and the second conversion worked // successfully, but we still have to undo the subtraction we did // earlier. - tasm->Move(kScratchRegister, 0x80000000); - tasm->orl(dst, kScratchRegister); - tasm->bind(&success); + masm->Move(kScratchRegister, 0x80000000); + masm->orl(dst, kScratchRegister); + masm->bind(&success); } } // namespace -void TurboAssembler::Cvttsd2uiq(Register dst, Operand src, Label* fail) { +void MacroAssembler::Cvttsd2uiq(Register dst, Operand src, Label* fail) { ConvertFloatToUint64(this, dst, src, fail); } -void TurboAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* fail) { +void MacroAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* fail) { ConvertFloatToUint64(this, dst, src, fail); } -void TurboAssembler::Cvttsd2ui(Register dst, Operand src, Label* fail) { +void MacroAssembler::Cvttsd2ui(Register dst, Operand src, Label* fail) { ConvertFloatToUint32(this, dst, src, fail); } -void TurboAssembler::Cvttsd2ui(Register dst, XMMRegister src, Label* fail) { +void MacroAssembler::Cvttsd2ui(Register dst, XMMRegister src, Label* fail) { ConvertFloatToUint32(this, dst, src, fail); } -void TurboAssembler::Cvttss2uiq(Register dst, Operand src, Label* fail) { +void MacroAssembler::Cvttss2uiq(Register dst, Operand src, Label* fail) { ConvertFloatToUint64(this, dst, src, fail); } -void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) { +void MacroAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) { ConvertFloatToUint64(this, dst, src, fail); } -void TurboAssembler::Cvttss2ui(Register dst, Operand src, Label* fail) { +void MacroAssembler::Cvttss2ui(Register dst, Operand src, Label* fail) { ConvertFloatToUint32(this, dst, src, fail); } -void TurboAssembler::Cvttss2ui(Register dst, XMMRegister src, Label* fail) { +void MacroAssembler::Cvttss2ui(Register dst, XMMRegister src, Label* fail) { ConvertFloatToUint32(this, dst, src, fail); } -void TurboAssembler::Cmpeqss(XMMRegister dst, XMMRegister src) { +void MacroAssembler::Cmpeqss(XMMRegister dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); vcmpeqss(dst, src); @@ -1456,7 +1456,7 @@ void TurboAssembler::Cmpeqss(XMMRegister dst, XMMRegister src) { } } -void TurboAssembler::Cmpeqsd(XMMRegister dst, XMMRegister src) { +void MacroAssembler::Cmpeqsd(XMMRegister dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); vcmpeqsd(dst, src); @@ -1468,12 +1468,12 @@ void TurboAssembler::Cmpeqsd(XMMRegister dst, XMMRegister src) { // ---------------------------------------------------------------------------- // Smi tagging, untagging and tag detection. -Register TurboAssembler::GetSmiConstant(Smi source) { +Register MacroAssembler::GetSmiConstant(Smi source) { Move(kScratchRegister, source); return kScratchRegister; } -void TurboAssembler::Cmp(Register dst, int32_t src) { +void MacroAssembler::Cmp(Register dst, int32_t src) { if (src == 0) { testl(dst, dst); } else { @@ -1481,7 +1481,7 @@ void TurboAssembler::Cmp(Register dst, int32_t src) { } } -void TurboAssembler::SmiTag(Register reg) { +void MacroAssembler::SmiTag(Register reg) { static_assert(kSmiTag == 0); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); if (COMPRESS_POINTERS_BOOL) { @@ -1492,7 +1492,7 @@ void TurboAssembler::SmiTag(Register reg) { } } -void TurboAssembler::SmiTag(Register dst, Register src) { +void MacroAssembler::SmiTag(Register dst, Register src) { DCHECK(dst != src); if (COMPRESS_POINTERS_BOOL) { movl(dst, src); @@ -1502,7 +1502,7 @@ void TurboAssembler::SmiTag(Register dst, Register src) { SmiTag(dst); } -void TurboAssembler::SmiUntag(Register reg) { +void MacroAssembler::SmiUntag(Register reg) { static_assert(kSmiTag == 0); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); // TODO(v8:7703): Is there a way to avoid this sign extension when pointer @@ -1513,7 +1513,7 @@ void TurboAssembler::SmiUntag(Register reg) { sarq(reg, Immediate(kSmiShift)); } -void TurboAssembler::SmiUntagUnsigned(Register reg) { +void MacroAssembler::SmiUntagUnsigned(Register reg) { static_assert(kSmiTag == 0); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); if (COMPRESS_POINTERS_BOOL) { @@ -1524,7 +1524,7 @@ void TurboAssembler::SmiUntagUnsigned(Register reg) { } } -void TurboAssembler::SmiUntag(Register dst, Register src) { +void MacroAssembler::SmiUntag(Register dst, Register src) { DCHECK(dst != src); if (COMPRESS_POINTERS_BOOL) { movsxlq(dst, src); @@ -1538,7 +1538,7 @@ void TurboAssembler::SmiUntag(Register dst, Register src) { sarq(dst, Immediate(kSmiShift)); } -void TurboAssembler::SmiUntag(Register dst, Operand src) { +void MacroAssembler::SmiUntag(Register dst, Operand src) { if (SmiValuesAre32Bits()) { // Sign extend to 64-bit. movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); @@ -1553,7 +1553,7 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) { } } -void TurboAssembler::SmiUntagUnsigned(Register dst, Operand src) { +void MacroAssembler::SmiUntagUnsigned(Register dst, Operand src) { if (SmiValuesAre32Bits()) { // Zero extend to 64-bit. movl(dst, Operand(src, kSmiShift / kBitsPerByte)); @@ -1570,7 +1570,7 @@ void TurboAssembler::SmiUntagUnsigned(Register dst, Operand src) { } } -void TurboAssembler::SmiToInt32(Register reg) { +void MacroAssembler::SmiToInt32(Register reg) { static_assert(kSmiTag == 0); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); if (COMPRESS_POINTERS_BOOL) { @@ -1580,24 +1580,24 @@ void TurboAssembler::SmiToInt32(Register reg) { } } -void TurboAssembler::SmiToInt32(Register dst, Register src) { +void MacroAssembler::SmiToInt32(Register dst, Register src) { DCHECK(dst != src); mov_tagged(dst, src); SmiToInt32(dst); } -void TurboAssembler::SmiCompare(Register smi1, Register smi2) { +void MacroAssembler::SmiCompare(Register smi1, Register smi2) { AssertSmi(smi1); AssertSmi(smi2); cmp_tagged(smi1, smi2); } -void TurboAssembler::SmiCompare(Register dst, Smi src) { +void MacroAssembler::SmiCompare(Register dst, Smi src) { AssertSmi(dst); Cmp(dst, src); } -void TurboAssembler::Cmp(Register dst, Smi src) { +void MacroAssembler::Cmp(Register dst, Smi src) { if (src.value() == 0) { test_tagged(dst, dst); } else if (COMPRESS_POINTERS_BOOL) { @@ -1609,19 +1609,19 @@ void TurboAssembler::Cmp(Register dst, Smi src) { } } -void TurboAssembler::SmiCompare(Register dst, Operand src) { +void MacroAssembler::SmiCompare(Register dst, Operand src) { AssertSmi(dst); AssertSmi(src); cmp_tagged(dst, src); } -void TurboAssembler::SmiCompare(Operand dst, Register src) { +void MacroAssembler::SmiCompare(Operand dst, Register src) { AssertSmi(dst); AssertSmi(src); cmp_tagged(dst, src); } -void TurboAssembler::SmiCompare(Operand dst, Smi src) { +void MacroAssembler::SmiCompare(Operand dst, Smi src) { AssertSmi(dst); if (SmiValuesAre32Bits()) { cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src.value())); @@ -1631,44 +1631,44 @@ void TurboAssembler::SmiCompare(Operand dst, Smi src) { } } -void TurboAssembler::Cmp(Operand dst, Smi src) { +void MacroAssembler::Cmp(Operand dst, Smi src) { // The Operand cannot use the smi register. Register smi_reg = GetSmiConstant(src); DCHECK(!dst.AddressUsesRegister(smi_reg)); cmp_tagged(dst, smi_reg); } -Condition TurboAssembler::CheckSmi(Register src) { +Condition MacroAssembler::CheckSmi(Register src) { static_assert(kSmiTag == 0); testb(src, Immediate(kSmiTagMask)); return zero; } -Condition TurboAssembler::CheckSmi(Operand src) { +Condition MacroAssembler::CheckSmi(Operand src) { static_assert(kSmiTag == 0); testb(src, Immediate(kSmiTagMask)); return zero; } -void TurboAssembler::JumpIfSmi(Register src, Label* on_smi, +void MacroAssembler::JumpIfSmi(Register src, Label* on_smi, Label::Distance near_jump) { Condition smi = CheckSmi(src); j(smi, on_smi, near_jump); } -void TurboAssembler::JumpIfNotSmi(Register src, Label* on_not_smi, +void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi, Label::Distance near_jump) { Condition smi = CheckSmi(src); j(NegateCondition(smi), on_not_smi, near_jump); } -void TurboAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi, +void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi, Label::Distance near_jump) { Condition smi = CheckSmi(src); j(NegateCondition(smi), on_not_smi, near_jump); } -void TurboAssembler::SmiAddConstant(Operand dst, Smi constant) { +void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) { if (constant.value() != 0) { if (SmiValuesAre32Bits()) { addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant.value())); @@ -1688,7 +1688,7 @@ void TurboAssembler::SmiAddConstant(Operand dst, Smi constant) { } } -SmiIndex TurboAssembler::SmiToIndex(Register dst, Register src, int shift) { +SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) { if (SmiValuesAre32Bits()) { DCHECK(is_uint6(shift)); // There is a possible optimization if shift is in the range 60-63, but that @@ -1719,7 +1719,7 @@ SmiIndex TurboAssembler::SmiToIndex(Register dst, Register src, int shift) { } } -void TurboAssembler::Switch(Register scratch, Register reg, int case_value_base, +void MacroAssembler::Switch(Register scratch, Register reg, int case_value_base, Label** labels, int num_labels) { Register table = scratch; Label fallthrough, jump_table; @@ -1739,7 +1739,7 @@ void TurboAssembler::Switch(Register scratch, Register reg, int case_value_base, bind(&fallthrough); } -void TurboAssembler::Push(Smi source) { +void MacroAssembler::Push(Smi source) { intptr_t smi = static_cast(source.ptr()); if (is_int32(smi)) { Push(Immediate(static_cast(smi))); @@ -1760,7 +1760,7 @@ void TurboAssembler::Push(Smi source) { // ---------------------------------------------------------------------------- -void TurboAssembler::Move(Register dst, Smi source) { +void MacroAssembler::Move(Register dst, Smi source) { static_assert(kSmiTag == 0); int value = source.value(); if (value == 0) { @@ -1773,7 +1773,7 @@ void TurboAssembler::Move(Register dst, Smi source) { } } -void TurboAssembler::Move(Operand dst, intptr_t x) { +void MacroAssembler::Move(Operand dst, intptr_t x) { if (is_int32(x)) { movq(dst, Immediate(static_cast(x))); } else { @@ -1782,7 +1782,7 @@ void TurboAssembler::Move(Operand dst, intptr_t x) { } } -void TurboAssembler::Move(Register dst, ExternalReference ext) { +void MacroAssembler::Move(Register dst, ExternalReference ext) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -1793,14 +1793,14 @@ void TurboAssembler::Move(Register dst, ExternalReference ext) { movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE)); } -void TurboAssembler::Move(Register dst, Register src) { +void MacroAssembler::Move(Register dst, Register src) { if (dst != src) { movq(dst, src); } } -void TurboAssembler::Move(Register dst, Operand src) { movq(dst, src); } -void TurboAssembler::Move(Register dst, Immediate src) { +void MacroAssembler::Move(Register dst, Operand src) { movq(dst, src); } +void MacroAssembler::Move(Register dst, Immediate src) { if (src.rmode() == RelocInfo::Mode::NO_INFO) { Move(dst, src.value()); } else { @@ -1808,13 +1808,13 @@ void TurboAssembler::Move(Register dst, Immediate src) { } } -void TurboAssembler::Move(XMMRegister dst, XMMRegister src) { +void MacroAssembler::Move(XMMRegister dst, XMMRegister src) { if (dst != src) { Movaps(dst, src); } } -void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, +void MacroAssembler::MovePair(Register dst0, Register src0, Register dst1, Register src1) { if (dst0 != src1) { // Normal case: Writing to dst0 does not destroy src1. @@ -1833,7 +1833,7 @@ void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, } } -void TurboAssembler::MoveNumber(Register dst, double value) { +void MacroAssembler::MoveNumber(Register dst, double value) { int32_t smi; if (DoubleToSmiInteger(value, &smi)) { Move(dst, Smi::FromInt(smi)); @@ -1842,7 +1842,7 @@ void TurboAssembler::MoveNumber(Register dst, double value) { } } -void TurboAssembler::Move(XMMRegister dst, uint32_t src) { +void MacroAssembler::Move(XMMRegister dst, uint32_t src) { if (src == 0) { Xorps(dst, dst); } else { @@ -1861,7 +1861,7 @@ void TurboAssembler::Move(XMMRegister dst, uint32_t src) { } } -void TurboAssembler::Move(XMMRegister dst, uint64_t src) { +void MacroAssembler::Move(XMMRegister dst, uint64_t src) { if (src == 0) { Xorpd(dst, dst); } else { @@ -1886,7 +1886,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) { } } -void TurboAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) { +void MacroAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) { if (high == low) { Move(dst, low); Punpcklqdq(dst, dst); @@ -1967,12 +1967,12 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, j(below_equal, on_in_range, near_jump); } -void TurboAssembler::Push(Handle source) { +void MacroAssembler::Push(Handle source) { Move(kScratchRegister, source); Push(kScratchRegister); } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, PushArrayOrder order) { DCHECK(!AreAliased(array, size, scratch)); Register counter = scratch; @@ -1997,7 +1997,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch, } } -void TurboAssembler::Move(Register result, Handle object, +void MacroAssembler::Move(Register result, Handle object, RelocInfo::Mode rmode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than @@ -2016,7 +2016,7 @@ void TurboAssembler::Move(Register result, Handle object, } } -void TurboAssembler::Move(Operand dst, Handle object, +void MacroAssembler::Move(Operand dst, Handle object, RelocInfo::Mode rmode) { Move(kScratchRegister, object, rmode); movq(dst, kScratchRegister); @@ -2041,7 +2041,7 @@ void MacroAssembler::DropUnderReturnAddress(int stack_elements, PushReturnAddressFrom(scratch); } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode) { int receiver_bytes = (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0; @@ -2066,7 +2066,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArguments(Register count, Register scratch, +void MacroAssembler::DropArguments(Register count, Register scratch, ArgumentsCountType type, ArgumentsCountMode mode) { DCHECK(!AreAliased(count, scratch)); @@ -2075,7 +2075,7 @@ void TurboAssembler::DropArguments(Register count, Register scratch, PushReturnAddressFrom(scratch); } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, Register scratch, ArgumentsCountType type, @@ -2087,7 +2087,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, PushReturnAddressFrom(scratch); } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Operand receiver, Register scratch, ArgumentsCountType type, @@ -2100,13 +2100,13 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, PushReturnAddressFrom(scratch); } -void TurboAssembler::Push(Register src) { pushq(src); } +void MacroAssembler::Push(Register src) { pushq(src); } -void TurboAssembler::Push(Operand src) { pushq(src); } +void MacroAssembler::Push(Operand src) { pushq(src); } void MacroAssembler::PushQuad(Operand src) { pushq(src); } -void TurboAssembler::Push(Immediate value) { pushq(value); } +void MacroAssembler::Push(Immediate value) { pushq(value); } void MacroAssembler::PushImm32(int32_t imm32) { pushq_imm32(imm32); } @@ -2116,27 +2116,27 @@ void MacroAssembler::Pop(Operand dst) { popq(dst); } void MacroAssembler::PopQuad(Operand dst) { popq(dst); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { DCHECK(root_array_available()); jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry( isolate(), reference))); } -void TurboAssembler::Jump(Operand op) { jmp(op); } +void MacroAssembler::Jump(Operand op) { jmp(op); } -void TurboAssembler::Jump(Operand op, Condition cc) { +void MacroAssembler::Jump(Operand op, Condition cc) { Label skip; j(NegateCondition(cc), &skip, Label::kNear); Jump(op); bind(&skip); } -void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) { +void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) { Move(kScratchRegister, destination, rmode); jmp(kScratchRegister); } -void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode, Condition cc) { Label skip; j(NegateCondition(cc), &skip, Label::kNear); @@ -2144,7 +2144,7 @@ void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode, bind(&skip); } -void TurboAssembler::Jump(Handle code_object, RelocInfo::Mode rmode) { +void MacroAssembler::Jump(Handle code_object, RelocInfo::Mode rmode) { DCHECK_IMPLIES(options().isolate_independent_code, Builtins::IsIsolateIndependentBuiltin(*code_object)); Builtin builtin = Builtin::kNoBuiltinId; @@ -2156,7 +2156,7 @@ void TurboAssembler::Jump(Handle code_object, RelocInfo::Mode rmode) { jmp(code_object, rmode); } -void TurboAssembler::Jump(Handle code_object, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code_object, RelocInfo::Mode rmode, Condition cc) { DCHECK_IMPLIES(options().isolate_independent_code, Builtins::IsIsolateIndependentBuiltin(*code_object)); @@ -2174,12 +2174,12 @@ void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) { jmp(kOffHeapTrampolineRegister); } -void TurboAssembler::Call(ExternalReference ext) { +void MacroAssembler::Call(ExternalReference ext) { LoadAddress(kScratchRegister, ext); call(kScratchRegister); } -void TurboAssembler::Call(Operand op) { +void MacroAssembler::Call(Operand op) { if (!CpuFeatures::IsSupported(INTEL_ATOM)) { call(op); } else { @@ -2188,12 +2188,12 @@ void TurboAssembler::Call(Operand op) { } } -void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) { +void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) { Move(kScratchRegister, destination, rmode); call(kScratchRegister); } -void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { +void MacroAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { DCHECK_IMPLIES(options().isolate_independent_code, Builtins::IsIsolateIndependentBuiltin(*code_object)); Builtin builtin = Builtin::kNoBuiltinId; @@ -2205,12 +2205,12 @@ void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { call(code_object, rmode); } -Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +Operand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { DCHECK(root_array_available()); return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) { +Operand MacroAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) { if (SmiValuesAre32Bits()) { // The builtin_index register contains the builtin index as a Smi. SmiUntagUnsigned(builtin_index); @@ -2227,11 +2227,11 @@ Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) { } } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { Call(EntryFromBuiltinIndexAsOperand(builtin_index)); } -void TurboAssembler::CallBuiltin(Builtin builtin) { +void MacroAssembler::CallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); switch (options().builtin_call_jump_mode) { case BuiltinCallJumpMode::kAbsolute: @@ -2251,7 +2251,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin) { +void MacroAssembler::TailCallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); switch (options().builtin_call_jump_mode) { @@ -2272,7 +2272,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cc) { +void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cc) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); switch (options().builtin_call_jump_mode) { @@ -2293,12 +2293,12 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cc) { } } -void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) { ASM_CODE_COMMENT(this); movq(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -2306,12 +2306,12 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, subq(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object) { LoadCodeEntry(code_object, code_object); call(code_object); } -void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { LoadCodeEntry(code_object, code_object); switch (jump_mode) { case JumpMode::kJump: @@ -2324,7 +2324,7 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { } } -void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src, +void MacroAssembler::PextrdPreSse41(Register dst, XMMRegister src, uint8_t imm8) { if (imm8 == 0) { Movd(dst, src); @@ -2337,42 +2337,42 @@ void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src, namespace { template -void PinsrdPreSse41Helper(TurboAssembler* tasm, XMMRegister dst, Op src, +void PinsrdPreSse41Helper(MacroAssembler* masm, XMMRegister dst, Op src, uint8_t imm8, uint32_t* load_pc_offset) { - tasm->Movd(kScratchDoubleReg, src); - if (load_pc_offset) *load_pc_offset = tasm->pc_offset(); + masm->Movd(kScratchDoubleReg, src); + if (load_pc_offset) *load_pc_offset = masm->pc_offset(); if (imm8 == 1) { - tasm->punpckldq(dst, kScratchDoubleReg); + masm->punpckldq(dst, kScratchDoubleReg); } else { DCHECK_EQ(0, imm8); - tasm->Movss(dst, kScratchDoubleReg); + masm->Movss(dst, kScratchDoubleReg); } } } // namespace -void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8, +void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8, uint32_t* load_pc_offset) { PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset); } -void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8, +void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8, uint32_t* load_pc_offset) { PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset); } -void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, +void MacroAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8, uint32_t* load_pc_offset) { PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2, imm8, load_pc_offset, {SSE4_1}); } -void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, +void MacroAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8, uint32_t* load_pc_offset) { PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2, imm8, load_pc_offset, {SSE4_1}); } -void TurboAssembler::Lzcntl(Register dst, Register src) { +void MacroAssembler::Lzcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcntl(dst, src); @@ -2386,7 +2386,7 @@ void TurboAssembler::Lzcntl(Register dst, Register src) { xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x } -void TurboAssembler::Lzcntl(Register dst, Operand src) { +void MacroAssembler::Lzcntl(Register dst, Operand src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcntl(dst, src); @@ -2400,7 +2400,7 @@ void TurboAssembler::Lzcntl(Register dst, Operand src) { xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x } -void TurboAssembler::Lzcntq(Register dst, Register src) { +void MacroAssembler::Lzcntq(Register dst, Register src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcntq(dst, src); @@ -2414,7 +2414,7 @@ void TurboAssembler::Lzcntq(Register dst, Register src) { xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x } -void TurboAssembler::Lzcntq(Register dst, Operand src) { +void MacroAssembler::Lzcntq(Register dst, Operand src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcntq(dst, src); @@ -2428,7 +2428,7 @@ void TurboAssembler::Lzcntq(Register dst, Operand src) { xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x } -void TurboAssembler::Tzcntq(Register dst, Register src) { +void MacroAssembler::Tzcntq(Register dst, Register src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcntq(dst, src); @@ -2442,7 +2442,7 @@ void TurboAssembler::Tzcntq(Register dst, Register src) { bind(¬_zero_src); } -void TurboAssembler::Tzcntq(Register dst, Operand src) { +void MacroAssembler::Tzcntq(Register dst, Operand src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcntq(dst, src); @@ -2456,7 +2456,7 @@ void TurboAssembler::Tzcntq(Register dst, Operand src) { bind(¬_zero_src); } -void TurboAssembler::Tzcntl(Register dst, Register src) { +void MacroAssembler::Tzcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcntl(dst, src); @@ -2469,7 +2469,7 @@ void TurboAssembler::Tzcntl(Register dst, Register src) { bind(¬_zero_src); } -void TurboAssembler::Tzcntl(Register dst, Operand src) { +void MacroAssembler::Tzcntl(Register dst, Operand src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcntl(dst, src); @@ -2482,7 +2482,7 @@ void TurboAssembler::Tzcntl(Register dst, Operand src) { bind(¬_zero_src); } -void TurboAssembler::Popcntl(Register dst, Register src) { +void MacroAssembler::Popcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcntl(dst, src); @@ -2491,7 +2491,7 @@ void TurboAssembler::Popcntl(Register dst, Register src) { UNREACHABLE(); } -void TurboAssembler::Popcntl(Register dst, Operand src) { +void MacroAssembler::Popcntl(Register dst, Operand src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcntl(dst, src); @@ -2500,7 +2500,7 @@ void TurboAssembler::Popcntl(Register dst, Operand src) { UNREACHABLE(); } -void TurboAssembler::Popcntq(Register dst, Register src) { +void MacroAssembler::Popcntq(Register dst, Register src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcntq(dst, src); @@ -2509,7 +2509,7 @@ void TurboAssembler::Popcntq(Register dst, Register src) { UNREACHABLE(); } -void TurboAssembler::Popcntq(Register dst, Operand src) { +void MacroAssembler::Popcntq(Register dst, Operand src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcntq(dst, src); @@ -2542,9 +2542,9 @@ void MacroAssembler::PopStackHandler() { addq(rsp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize)); } -void TurboAssembler::Ret() { ret(0); } +void MacroAssembler::Ret() { ret(0); } -void TurboAssembler::Ret(int bytes_dropped, Register scratch) { +void MacroAssembler::Ret(int bytes_dropped, Register scratch) { if (is_uint16(bytes_dropped)) { ret(bytes_dropped); } else { @@ -2555,7 +2555,7 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) { } } -void TurboAssembler::IncsspqIfSupported(Register number_of_words, +void MacroAssembler::IncsspqIfSupported(Register number_of_words, Register scratch) { // Optimized code can validate at runtime whether the cpu supports the // incsspq instruction, so it shouldn't use this method. @@ -2578,7 +2578,7 @@ void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type, CmpInstanceType(map, type); } -void TurboAssembler::CmpInstanceType(Register map, InstanceType type) { +void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type)); } @@ -2602,28 +2602,28 @@ Immediate MacroAssembler::ClearedValue() const { } #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::AssertNotSmi(Register object) { +void MacroAssembler::AssertNotSmi(Register object) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); Condition is_smi = CheckSmi(object); Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi); } -void TurboAssembler::AssertSmi(Register object) { +void MacroAssembler::AssertSmi(Register object) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); Condition is_smi = CheckSmi(object); Check(is_smi, AbortReason::kOperandIsNotASmi); } -void TurboAssembler::AssertSmi(Operand object) { +void MacroAssembler::AssertSmi(Operand object) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); Condition is_smi = CheckSmi(object); Check(is_smi, AbortReason::kOperandIsNotASmi); } -void TurboAssembler::AssertZeroExtended(Register int32_register) { +void MacroAssembler::AssertZeroExtended(Register int32_register) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); DCHECK_NE(int32_register, kScratchRegister); @@ -2632,7 +2632,7 @@ void TurboAssembler::AssertZeroExtended(Register int32_register) { Check(below_equal, AbortReason::k32BitValueInRegisterIsNotZeroExtended); } -void TurboAssembler::AssertSignedBitOfSmiIsZero(Register smi_register) { +void MacroAssembler::AssertSignedBitOfSmiIsZero(Register smi_register) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); DCHECK(COMPRESS_POINTERS_BOOL); @@ -2640,7 +2640,7 @@ void TurboAssembler::AssertSignedBitOfSmiIsZero(Register smi_register) { Check(zero, AbortReason::kSignedBitOfSmiIsNotZero); } -void TurboAssembler::AssertMap(Register object) { +void MacroAssembler::AssertMap(Register object) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); testb(object, Immediate(kSmiTagMask)); @@ -2652,7 +2652,7 @@ void TurboAssembler::AssertMap(Register object) { Check(equal, AbortReason::kOperandIsNotAMap); } -void TurboAssembler::AssertCode(Register object) { +void MacroAssembler::AssertCode(Register object) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); testb(object, Immediate(kSmiTagMask)); @@ -2749,11 +2749,11 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) { bind(&done_checking); } -void TurboAssembler::Assert(Condition cc, AbortReason reason) { +void MacroAssembler::Assert(Condition cc, AbortReason reason) { if (v8_flags.debug_code) Check(cc, reason); } -void TurboAssembler::AssertUnreachable(AbortReason reason) { +void MacroAssembler::AssertUnreachable(AbortReason reason) { if (v8_flags.debug_code) Abort(reason); } #endif // V8_ENABLE_DEBUG_CODE @@ -2884,10 +2884,10 @@ Operand MacroAssembler::StackLimitAsOperand(StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); return Operand(kRootRegister, static_cast(offset)); } @@ -3015,14 +3015,14 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, SmiUntag(expected_parameter_count); } -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { ASM_CODE_COMMENT(this); pushq(rbp); // Caller's frame pointer. movq(rbp, rsp); Push(Immediate(StackFrame::TypeToMarker(type))); } -void TurboAssembler::Prologue() { +void MacroAssembler::Prologue() { ASM_CODE_COMMENT(this); pushq(rbp); // Caller's frame pointer. movq(rbp, rsp); @@ -3031,7 +3031,7 @@ void TurboAssembler::Prologue() { Push(kJavaScriptCallArgCountRegister); // Actual argument count. } -void TurboAssembler::EnterFrame(StackFrame::Type type) { +void MacroAssembler::EnterFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); pushq(rbp); movq(rbp, rsp); @@ -3043,7 +3043,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { #endif // V8_ENABLE_WEBASSEMBLY } -void TurboAssembler::LeaveFrame(StackFrame::Type type) { +void MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); // TODO(v8:11429): Consider passing BASELINE instead, and checking for // IsJSFrame or similar. Could then unify with manual frame leaves in the @@ -3058,7 +3058,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) { } #if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOS) -void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { +void MacroAssembler::AllocateStackSpace(Register bytes_scratch) { ASM_CODE_COMMENT(this); // On Windows and on macOS, we cannot increment the stack size by more than // one page (minimum page size is 4KB) without accessing at least one byte on @@ -3080,7 +3080,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { subq(rsp, bytes_scratch); } -void TurboAssembler::AllocateStackSpace(int bytes) { +void MacroAssembler::AllocateStackSpace(int bytes) { ASM_CODE_COMMENT(this); DCHECK_GE(bytes, 0); while (bytes >= kStackPageSize) { @@ -3260,7 +3260,7 @@ void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, Move(scratch_and_result, 0); } -int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { +int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { // On Windows 64 stack slots are reserved by the caller for all arguments // including the ones passed in registers, and space is always allocated for // the four register arguments even if the function takes fewer than four @@ -3278,7 +3278,7 @@ int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { #endif } -void TurboAssembler::PrepareCallCFunction(int num_arguments) { +void MacroAssembler::PrepareCallCFunction(int num_arguments) { ASM_CODE_COMMENT(this); int frame_alignment = base::OS::ActivationFrameAlignment(); DCHECK_NE(frame_alignment, 0); @@ -3295,14 +3295,14 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments) { kScratchRegister); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { ASM_CODE_COMMENT(this); LoadAddress(rax, function); CallCFunction(rax, num_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { ASM_CODE_COMMENT(this); DCHECK_LE(num_arguments, kMaxCParameters); DCHECK(has_frame()); @@ -3376,7 +3376,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) { movq(rsp, Operand(rsp, argument_slots_on_stack * kSystemPointerSize)); } -void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, +void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met, Label::Distance condition_met_distance) { ASM_CODE_COMMENT(this); @@ -3396,7 +3396,7 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, j(cc, condition_met, condition_met_distance); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { Label current; bind(¤t); int pc = pc_offset(); @@ -3411,7 +3411,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { // the flags in the referenced {Code} object; // 2. test kMarkedForDeoptimizationBit in those flags; and // 3. if it is not zero then it jumps to the builtin. -void TurboAssembler::BailoutIfDeoptimized(Register scratch) { +void MacroAssembler::BailoutIfDeoptimized(Register scratch) { int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; LoadTaggedPointerField(scratch, Operand(kJavaScriptCallCodeStartRegister, offset)); @@ -3421,12 +3421,12 @@ void TurboAssembler::BailoutIfDeoptimized(Register scratch) { RelocInfo::CODE_TARGET, not_zero); } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); // Note: Assembler::call is used here on purpose to guarantee fixed-size - // exits even on Atom CPUs; see TurboAssembler::Call for Atom-specific + // exits even on Atom CPUs; see MacroAssembler::Call for Atom-specific // performance tuning which emits a different instruction sequence. call(EntryFromBuiltinAsOperand(target)); DCHECK_EQ(SizeOfCodeGeneratedSince(exit), @@ -3434,8 +3434,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::Trap() { int3(); } -void TurboAssembler::DebugBreak() { int3(); } +void MacroAssembler::Trap() { int3(); } +void MacroAssembler::DebugBreak() { int3(); } } // namespace internal } // namespace v8 diff --git a/src/codegen/x64/macro-assembler-x64.h b/src/codegen/x64/macro-assembler-x64.h index 5003555b00..3a526e2cbc 100644 --- a/src/codegen/x64/macro-assembler-x64.h +++ b/src/codegen/x64/macro-assembler-x64.h @@ -55,10 +55,10 @@ class StackArgumentsAccessor { DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor); }; -class V8_EXPORT_PRIVATE TurboAssembler - : public SharedTurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler + : public SharedMacroAssembler { public: - using SharedTurboAssemblerBase::SharedTurboAssemblerBase; + using SharedMacroAssembler::SharedMacroAssembler; void PushReturnAddressFrom(Register src) { pushq(src); } void PopReturnAddressTo(Register dst) { popq(dst); } @@ -653,23 +653,6 @@ class V8_EXPORT_PRIVATE TurboAssembler IsolateRootLocation isolateRootLocation = IsolateRootLocation::kInRootRegister); - protected: - static const int kSmiShift = kSmiTagSize + kSmiShiftSize; - - // Returns a register holding the smi value. The register MUST NOT be - // modified. It may be the "smi 1 constant" register. - Register GetSmiConstant(Smi value); - - // Drops arguments assuming that the return address was already popped. - void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger, - ArgumentsCountMode mode = kCountExcludesReceiver); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - // Loads and stores the value of an external reference. // Special case code for load and store to take advantage of // load_rax/store_rax if possible/necessary. @@ -781,7 +764,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // --------------------------------------------------------------------------- // Macro instructions. - using TurboAssembler::Cmp; void Cmp(Register dst, Handle source); void Cmp(Operand dst, Handle source); @@ -945,6 +927,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // In-place weak references. void LoadWeakValue(Register in_out, Label* target_if_cleared); + protected: + static const int kSmiShift = kSmiTagSize + kSmiShiftSize; + + // Returns a register holding the smi value. The register MUST NOT be + // modified. It may be the "smi 1 constant" register. + Register GetSmiConstant(Smi value); + + // Drops arguments assuming that the return address was already popped. + void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger, + ArgumentsCountMode mode = kCountExcludesReceiver); + private: // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, diff --git a/src/compiler/backend/arm/code-generator-arm.cc b/src/compiler/backend/arm/code-generator-arm.cc index 4d7ab00bce..66ed969eb7 100644 --- a/src/compiler/backend/arm/code-generator-arm.cc +++ b/src/compiler/backend/arm/code-generator-arm.cc @@ -29,7 +29,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> // Adds Arm-specific methods to convert InstructionOperands. class ArmOperandConverter final : public InstructionOperandConverter { @@ -415,7 +415,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { do { \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* and generate a CallAddress instruction instead. */ \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -429,7 +429,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { do { \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* and generate a CallAddress instruction instead. */ \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 1); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ @@ -473,7 +473,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { if (instr->InputAt(1)->IsImmediate()) { \ __ asm_imm(dt, dst, src, i.InputInt##width(1)); \ } else { \ - UseScratchRegisterScope temps(tasm()); \ + UseScratchRegisterScope temps(masm()); \ Simd128Register tmp = temps.AcquireQ(); \ Register shift = temps.Acquire(); \ constexpr int mask = (1 << width) - 1; \ @@ -493,7 +493,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { if (instr->InputAt(1)->IsImmediate()) { \ __ asm_imm(dt, dst, src, i.InputInt##width(1)); \ } else { \ - UseScratchRegisterScope temps(tasm()); \ + UseScratchRegisterScope temps(masm()); \ Simd128Register tmp = temps.AcquireQ(); \ Register shift = temps.Acquire(); \ constexpr int mask = (1 << width) - 1; \ @@ -518,20 +518,20 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void FlushPendingPushRegisters(TurboAssembler* tasm, +void FlushPendingPushRegisters(MacroAssembler* masm, FrameAccessState* frame_access_state, ZoneVector* pending_pushes) { switch (pending_pushes->size()) { case 0: break; case 1: - tasm->push((*pending_pushes)[0]); + masm->push((*pending_pushes)[0]); break; case 2: - tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]); + masm->Push((*pending_pushes)[0], (*pending_pushes)[1]); break; case 3: - tasm->Push((*pending_pushes)[0], (*pending_pushes)[1], + masm->Push((*pending_pushes)[0], (*pending_pushes)[1], (*pending_pushes)[2]); break; default: @@ -542,7 +542,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm, } void AdjustStackPointerForTailCall( - TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp, + MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, ZoneVector* pending_pushes = nullptr, bool allow_shrinkage = true) { int current_sp_offset = state->GetSPToFPSlotCount() + @@ -550,15 +550,15 @@ void AdjustStackPointerForTailCall( int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { if (pending_pushes != nullptr) { - FlushPendingPushRegisters(tasm, state, pending_pushes); + FlushPendingPushRegisters(masm, state, pending_pushes); } - tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize); + masm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { if (pending_pushes != nullptr) { - FlushPendingPushRegisters(tasm, state, pending_pushes); + FlushPendingPushRegisters(masm, state, pending_pushes); } - tasm->add(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); + masm->add(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); state->IncreaseSPDelta(stack_slot_delta); } } @@ -601,7 +601,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, LocationOperand::cast(move->destination())); InstructionOperand source(move->source()); AdjustStackPointerForTailCall( - tasm(), frame_access_state(), + masm(), frame_access_state(), destination_location.index() - pending_pushes.size(), &pending_pushes); // Pushes of non-register data types are not supported. @@ -611,26 +611,26 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, // TODO(arm): We can push more than 3 registers at once. Add support in // the macro-assembler for pushing a list of registers. if (pending_pushes.size() == 3) { - FlushPendingPushRegisters(tasm(), frame_access_state(), + FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes); } move->Eliminate(); } - FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes); + FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes); } - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, nullptr, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } // Check that {kJavaScriptCallCodeStartRegister} is correct. void CodeGenerator::AssembleCodeStartRegisterCheck() { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ ComputeCodeStartAddress(scratch); __ cmp(scratch, kJavaScriptCallCodeStartRegister); @@ -645,7 +645,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() { // 2. test kMarkedForDeoptimizationBit in those flags; and // 3. if it is not zero then it jumps to the builtin. void CodeGenerator::BailoutIfDeoptimized() { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; __ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset)); @@ -747,7 +747,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchCallJSFunction: { Register func = i.InputRegister(0); if (v8_flags.debug_code) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); // Check the function's context matches the context argument. __ ldr(scratch, FieldMemOperand(func, JSFunction::kContextOffset)); @@ -858,7 +858,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -1069,7 +1069,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputRegister(2), i.OutputSBit()); break; case kArmMls: { - CpuFeatureScope scope(tasm(), ARMv7); + CpuFeatureScope scope(masm(), ARMv7); __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), i.InputRegister(2)); DCHECK_EQ(LeaveCC, i.OutputSBit()); @@ -1093,13 +1093,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputRegister(1), i.OutputSBit()); break; case kArmSdiv: { - CpuFeatureScope scope(tasm(), SUDIV); + CpuFeatureScope scope(masm(), SUDIV); __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; } case kArmUdiv: { - CpuFeatureScope scope(tasm(), SUDIV); + CpuFeatureScope scope(masm(), SUDIV); __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; @@ -1127,20 +1127,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.OutputSBit()); break; case kArmBfc: { - CpuFeatureScope scope(tasm(), ARMv7); + CpuFeatureScope scope(masm(), ARMv7); __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2)); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; } case kArmUbfx: { - CpuFeatureScope scope(tasm(), ARMv7); + CpuFeatureScope scope(masm(), ARMv7); __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), i.InputInt8(2)); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; } case kArmSbfx: { - CpuFeatureScope scope(tasm(), ARMv7); + CpuFeatureScope scope(masm(), ARMv7); __ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), i.InputInt8(2)); DCHECK_EQ(LeaveCC, i.OutputSBit()); @@ -1183,7 +1183,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_EQ(LeaveCC, i.OutputSBit()); break; case kArmRbit: { - CpuFeatureScope scope(tasm(), ARMv7); + CpuFeatureScope scope(masm(), ARMv7); __ rbit(i.OutputRegister(), i.InputRegister(0)); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; @@ -1378,7 +1378,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmVmodF64: { // TODO(bmeurer): We should really get rid of this special instruction, // and generate a CallAddress instruction instead. - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ PrepareCallCFunction(0, 2); __ MovToFloatParameters(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); @@ -1398,7 +1398,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); break; case kArmVrintmF32: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); if (instr->InputAt(0)->IsSimd128Register()) { __ vrintm(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0)); @@ -1408,12 +1408,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVrintmF64: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); __ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); break; } case kArmVrintpF32: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); if (instr->InputAt(0)->IsSimd128Register()) { __ vrintp(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0)); @@ -1423,12 +1423,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVrintpF64: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); __ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); break; } case kArmVrintzF32: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); if (instr->InputAt(0)->IsSimd128Register()) { __ vrintz(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0)); @@ -1438,17 +1438,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVrintzF64: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); __ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); break; } case kArmVrintaF64: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); __ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); break; } case kArmVrintnF32: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); if (instr->InputAt(0)->IsSimd128Register()) { __ vrintn(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0)); @@ -1458,7 +1458,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVrintnF64: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); __ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); break; } @@ -1473,7 +1473,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtF32S32: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vmov(scratch, i.InputRegister(0)); __ vcvt_f32_s32(i.OutputFloatRegister(), scratch); @@ -1481,7 +1481,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtF32U32: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vmov(scratch, i.InputRegister(0)); __ vcvt_f32_u32(i.OutputFloatRegister(), scratch); @@ -1489,7 +1489,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtF64S32: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vmov(scratch, i.InputRegister(0)); __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch); @@ -1497,7 +1497,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtF64U32: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vmov(scratch, i.InputRegister(0)); __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch); @@ -1505,7 +1505,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtS32F32: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vcvt_s32_f32(scratch, i.InputFloatRegister(0)); __ vmov(i.OutputRegister(), scratch); @@ -1520,7 +1520,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtU32F32: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vcvt_u32_f32(scratch, i.InputFloatRegister(0)); __ vmov(i.OutputRegister(), scratch); @@ -1535,7 +1535,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtS32F64: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0)); __ vmov(i.OutputRegister(), scratch); @@ -1543,7 +1543,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtU32F64: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0)); __ vmov(i.OutputRegister(), scratch); @@ -1762,7 +1762,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vldr(i.OutputFloatRegister(), MemOperand(fp, offset)); } else { DCHECK_EQ(MachineRepresentation::kSimd128, op->representation()); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ add(scratch, fp, Operand(offset)); __ vld1(Neon8, NeonListOperand(i.OutputSimd128Register()), @@ -1899,7 +1899,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } #undef ASSEMBLE_F64X2_ARITHMETIC_BINOP case kArmF64x2Eq: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ mov(scratch, Operand(0)); __ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(), @@ -1915,7 +1915,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2Ne: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ mov(scratch, Operand(0)); __ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(), @@ -1931,7 +1931,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2Lt: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(), i.InputSimd128Register(1).low()); @@ -1947,7 +1947,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2Le: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(), i.InputSimd128Register(1).low()); @@ -1989,7 +1989,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2Ceil: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ vrintp(dst.low(), src.low()); @@ -1997,7 +1997,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2Floor: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ vrintm(dst.low(), src.low()); @@ -2005,7 +2005,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2Trunc: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ vrintz(dst.low(), src.low()); @@ -2013,7 +2013,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2NearestInt: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ vrintn(dst.low(), src.low()); @@ -2060,7 +2060,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI64x2Mul: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); QwNeonRegister dst = i.OutputSimd128Register(); QwNeonRegister left = i.InputSimd128Register(0); QwNeonRegister right = i.InputSimd128Register(1); @@ -2447,7 +2447,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI32x4BitMask: { Register dst = i.OutputRegister(); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register src = i.InputSimd128Register(0); Simd128Register tmp = temps.AcquireQ(); Simd128Register mask = i.TempSimd128Register(0); @@ -2468,7 +2468,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Simd128Register lhs = i.InputSimd128Register(0); Simd128Register rhs = i.InputSimd128Register(1); Simd128Register tmp1 = i.TempSimd128Register(0); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); __ vmull(NeonS16, tmp1, lhs.low(), rhs.low()); __ vmull(NeonS16, scratch, lhs.high(), rhs.high()); @@ -2650,7 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI16x8BitMask: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register dst = i.OutputRegister(); Simd128Register src = i.InputSimd128Register(0); Simd128Register tmp = temps.AcquireQ(); @@ -2805,7 +2805,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI8x16BitMask: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register dst = i.OutputRegister(); Simd128Register src = i.InputSimd128Register(0); Simd128Register tmp = temps.AcquireQ(); @@ -2906,7 +2906,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); DCHECK(dst == i.InputSimd128Register(0)); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); // src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7] __ vmov(scratch, src1); @@ -2917,7 +2917,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); DCHECK(dst == i.InputSimd128Register(0)); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); // src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from UnzipLeft). __ vmov(scratch, src1); @@ -2928,7 +2928,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); DCHECK(dst == i.InputSimd128Register(0)); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); // src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7] __ vmov(scratch, src1); @@ -2961,7 +2961,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS32x4TransposeRight: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from TransposeLeft). @@ -2990,7 +2990,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS16x8UnzipLeft: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15] @@ -3001,7 +3001,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS16x8UnzipRight: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped). @@ -3012,7 +3012,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS16x8TransposeLeft: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15] @@ -3023,7 +3023,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS16x8TransposeRight: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped). @@ -3052,7 +3052,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS8x16UnzipLeft: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31] @@ -3063,7 +3063,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS8x16UnzipRight: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped). @@ -3074,7 +3074,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS8x16TransposeLeft: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31] @@ -3085,7 +3085,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS8x16TransposeRight: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped). @@ -3112,7 +3112,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); DwVfpRegister table_base = src0.low(); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); // If unary shuffle, table is src0 (2 d-registers), otherwise src0 and // src1. They must be consecutive. @@ -3163,7 +3163,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmV128AnyTrue: { const QwNeonRegister& src = i.InputSimd128Register(0); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister scratch = temps.AcquireD(); __ vpmax(NeonU32, scratch, src.low(), src.high()); __ vpmax(NeonU32, scratch, scratch, scratch); @@ -3178,7 +3178,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI32x4AllTrue: { const QwNeonRegister& src = i.InputSimd128Register(0); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister scratch = temps.AcquireD(); __ vpmin(NeonU32, scratch, src.low(), src.high()); __ vpmin(NeonU32, scratch, scratch, scratch); @@ -3189,7 +3189,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI16x8AllTrue: { const QwNeonRegister& src = i.InputSimd128Register(0); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister scratch = temps.AcquireD(); __ vpmin(NeonU16, scratch, src.low(), src.high()); __ vpmin(NeonU16, scratch, scratch, scratch); @@ -3201,7 +3201,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI8x16AllTrue: { const QwNeonRegister& src = i.InputSimd128Register(0); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister scratch = temps.AcquireD(); __ vpmin(NeonU8, scratch, src.low(), src.high()); __ vpmin(NeonU8, scratch, scratch, scratch); @@ -3747,7 +3747,7 @@ void CodeGenerator::AssembleConstructFrame() { // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ ldr(scratch, FieldMemOperand( kWasmInstanceRegister, @@ -3873,8 +3873,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { __ cmp(argc_reg, Operand(parameter_slots)); __ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt); } - __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc_reg, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } else if (additional_pop_count->IsImmediate()) { DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type()); int additional_count = g.ToConstant(additional_pop_count).ToInt32(); @@ -3944,7 +3944,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } else if (source->IsDoubleRegister()) { __ vstr(g.ToDoubleRegister(source), dst); } else { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register temp = temps.Acquire(); QwNeonRegister src = g.ToSimd128Register(source); __ add(temp, dst.rn(), Operand(dst.offset())); @@ -3965,7 +3965,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } else if (source->IsDoubleStackSlot()) { __ vldr(g.ToDoubleRegister(destination), src); } else { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register temp = temps.Acquire(); QwNeonRegister dst = g.ToSimd128Register(destination); __ add(temp, src.rn(), Operand(src.offset())); @@ -3976,7 +3976,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, case MoveType::kStackToStack: { MemOperand src = g.ToMemOperand(source); MemOperand dst = g.ToMemOperand(destination); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); if (source->IsStackSlot() || source->IsFloatStackSlot()) { SwVfpRegister temp = temps.AcquireS(); __ vldr(temp, src); @@ -4014,27 +4014,27 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, Constant src = g.ToConstant(source); MemOperand dst = g.ToMemOperand(destination); if (destination->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); // Acquire a S register instead of a general purpose register in case // `vstr` needs one to compute the address of `dst`. SwVfpRegister s_temp = temps.AcquireS(); { // TODO(arm): This sequence could be optimized further if necessary by // writing the constant directly into `s_temp`. - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register temp = temps.Acquire(); MoveConstantToRegister(temp, src); __ vmov(s_temp, temp); } __ vstr(s_temp, dst); } else if (destination->IsFloatStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister temp = temps.AcquireS(); __ vmov(temp, Float32::FromBits(src.ToFloat32AsInt())); __ vstr(temp, dst); } else { DCHECK(destination->IsDoubleStackSlot()); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister temp = temps.AcquireD(); // TODO(arm): Look into optimizing this further if possible. Supporting // the NEON version of VMOV may help. @@ -4060,7 +4060,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) { __ push(g.ToRegister(source)); frame_access_state()->IncreaseSPDelta(new_slots); } else if (source->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ ldr(scratch, g.ToMemOperand(source)); __ push(scratch); @@ -4083,7 +4083,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) { if (dest->IsRegister()) { __ pop(g.ToRegister(dest)); } else if (dest->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ pop(scratch); __ str(scratch, g.ToMemOperand(dest)); @@ -4110,7 +4110,7 @@ void CodeGenerator::PopTempStackSlots() { void CodeGenerator::MoveToTempLocation(InstructionOperand* source, MachineRepresentation rep) { // Must be kept in sync with {MoveTempLocationTo}. - move_cycle_.temps.emplace(tasm()); + move_cycle_.temps.emplace(masm()); auto& temps = *move_cycle_.temps; // Temporarily exclude the reserved scratch registers while we pick a // location to resolve the cycle. Re-include them immediately afterwards so @@ -4184,7 +4184,7 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) { InstructionOperand& destination = move->destination(); MoveType::Type move_type = MoveType::InferMove(&move->source(), &move->destination()); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); if (move_type == MoveType::kStackToStack) { if (source.IsStackSlot() || source.IsFloatStackSlot()) { SwVfpRegister temp = temps.AcquireS(); @@ -4224,7 +4224,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, DCHECK(destination->IsFloatRegister()); // GapResolver may give us reg codes that don't map to actual // s-registers. Generate code to work around those cases. - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); LowDwVfpRegister temp = temps.AcquireLowD(); int src_code = LocationOperand::cast(source)->register_code(); int dst_code = LocationOperand::cast(destination)->register_code(); @@ -4241,20 +4241,20 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, MemOperand dst = g.ToMemOperand(destination); if (source->IsRegister()) { Register src = g.ToRegister(source); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister temp = temps.AcquireS(); __ vmov(temp, src); __ ldr(src, dst); __ vstr(temp, dst); } else if (source->IsFloatRegister()) { int src_code = LocationOperand::cast(source)->register_code(); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); LowDwVfpRegister temp = temps.AcquireLowD(); __ VmovExtended(temp.low().code(), src_code); __ VmovExtended(src_code, dst); __ vstr(temp.low(), dst); } else if (source->IsDoubleRegister()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister temp = temps.AcquireD(); DwVfpRegister src = g.ToDoubleRegister(source); __ Move(temp, src); @@ -4262,7 +4262,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, __ vstr(temp, dst); } else { QwNeonRegister src = g.ToSimd128Register(source); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register temp = temps.Acquire(); QwNeonRegister temp_q = temps.AcquireQ(); __ Move(temp_q, src); @@ -4276,7 +4276,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, MemOperand src = g.ToMemOperand(source); MemOperand dst = g.ToMemOperand(destination); if (source->IsStackSlot() || source->IsFloatStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister temp_0 = temps.AcquireS(); SwVfpRegister temp_1 = temps.AcquireS(); __ vldr(temp_0, dst); @@ -4284,7 +4284,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, __ vstr(temp_0, src); __ vstr(temp_1, dst); } else if (source->IsDoubleStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); LowDwVfpRegister temp = temps.AcquireLowD(); if (temps.CanAcquireD()) { DwVfpRegister temp_0 = temp; @@ -4317,7 +4317,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, MemOperand dst0 = dst; MemOperand src1(src.rn(), src.offset() + kDoubleSize); MemOperand dst1(dst.rn(), dst.offset() + kDoubleSize); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister temp_0 = temps.AcquireD(); DwVfpRegister temp_1 = temps.AcquireD(); __ vldr(temp_0, dst0); diff --git a/src/compiler/backend/arm/instruction-selector-arm.cc b/src/compiler/backend/arm/instruction-selector-arm.cc index 8733aff787..48e649051b 100644 --- a/src/compiler/backend/arm/instruction-selector-arm.cc +++ b/src/compiler/backend/arm/instruction-selector-arm.cc @@ -397,7 +397,7 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode, if (int_matcher.HasResolvedValue()) { ptrdiff_t const delta = int_matcher.ResolvedValue() + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector->isolate(), m.ResolvedValue()); input_count = 1; inputs[0] = g.UseImmediate(static_cast(delta)); @@ -753,7 +753,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node, if (int_matcher.HasResolvedValue()) { ptrdiff_t const delta = int_matcher.ResolvedValue() + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector->isolate(), m.ResolvedValue()); int input_count = 2; InstructionOperand inputs[2]; diff --git a/src/compiler/backend/arm64/code-generator-arm64.cc b/src/compiler/backend/arm64/code-generator-arm64.cc index 764309f677..e2f202eb3b 100644 --- a/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/src/compiler/backend/arm64/code-generator-arm64.cc @@ -24,7 +24,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> // Adds Arm64-specific methods to convert InstructionOperands. class Arm64OperandConverter final : public InstructionOperandConverter { @@ -238,13 +238,13 @@ class Arm64OperandConverter final : public InstructionOperandConverter { UNREACHABLE(); } - MemOperand ToMemOperand(InstructionOperand* op, TurboAssembler* tasm) const { + MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const { DCHECK_NOT_NULL(op); DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); - return SlotToMemOperand(AllocatedOperand::cast(op)->index(), tasm); + return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm); } - MemOperand SlotToMemOperand(int slot, TurboAssembler* tasm) const { + MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const { FrameOffset offset = frame_access_state()->GetFrameOffset(slot); if (offset.from_frame_pointer()) { int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset(); @@ -294,7 +294,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { : SaveFPRegsMode::kIgnore; if (must_save_lr_) { // We need to save and restore lr if the frame was elided. - __ Push(lr, padreg); + __ Push(lr, padreg); unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp); } if (mode_ == RecordWriteMode::kValueIsEphemeronKey) { @@ -311,7 +311,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { __ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode); } if (must_save_lr_) { - __ Pop(padreg, lr); + __ Pop(padreg, lr); unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset()); } } @@ -459,14 +459,14 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, // Handles unary ops that work for float (scalar), double (scalar), or NEON. template -void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr, +void EmitFpOrNeonUnop(MacroAssembler* masm, Fn fn, Instruction* instr, Arm64OperandConverter i, VectorFormat scalar, VectorFormat vector) { VectorFormat f = instr->InputAt(0)->IsSimd128Register() ? vector : scalar; VRegister output = VRegister::Create(i.OutputDoubleRegister().code(), f); VRegister input = VRegister::Create(i.InputDoubleRegister(0).code(), f); - (tasm->*fn)(output, input); + (masm->*fn)(output, input); } } // namespace @@ -539,13 +539,13 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr, #define ASSEMBLE_IEEE754_BINOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ } while (0) #define ASSEMBLE_IEEE754_UNOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ } while (0) @@ -558,7 +558,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr, __ asm_imm(i.OutputSimd128Register().format(), \ i.InputSimd128Register(0).format(), i.InputInt##width(1)); \ } else { \ - UseScratchRegisterScope temps(tasm()); \ + UseScratchRegisterScope temps(masm()); \ VRegister tmp = temps.AcquireQ(); \ Register shift = temps.Acquire##gp(); \ constexpr int mask = (1 << width) - 1; \ @@ -578,7 +578,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr, __ asm_imm(i.OutputSimd128Register().format(), \ i.InputSimd128Register(0).format(), i.InputInt##width(1)); \ } else { \ - UseScratchRegisterScope temps(tasm()); \ + UseScratchRegisterScope temps(masm()); \ VRegister tmp = temps.AcquireQ(); \ Register shift = temps.Acquire##gp(); \ constexpr int mask = (1 << width) - 1; \ @@ -592,7 +592,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr, void CodeGenerator::AssembleDeconstructFrame() { __ Mov(sp, fp); - __ Pop(fp, lr); + __ Pop(fp, lr); unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset()); } @@ -606,7 +606,7 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void AdjustStackPointerForTailCall(TurboAssembler* tasm, +void AdjustStackPointerForTailCall(MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, bool allow_shrinkage = true) { @@ -615,10 +615,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, int stack_slot_delta = new_slot_above_sp - current_sp_offset; DCHECK_EQ(stack_slot_delta % 2, 0); if (stack_slot_delta > 0) { - tasm->Claim(stack_slot_delta); + masm->Claim(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { - tasm->Drop(-stack_slot_delta); + masm->Drop(-stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta); } } @@ -627,14 +627,14 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { DCHECK_EQ(first_unused_slot_offset % 2, 0); - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); DCHECK(instr->IsTailCall()); InstructionOperandConverter g(this, instr); @@ -646,7 +646,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, // Check that {kJavaScriptCallCodeStartRegister} is correct. void CodeGenerator::AssembleCodeStartRegisterCheck() { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ ComputeCodeStartAddress(scratch); __ cmp(scratch, kJavaScriptCallCodeStartRegister); @@ -705,7 +705,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Jump(wasm_code, constant.rmode()); } else { Register target = i.InputRegister(0); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); temps.Exclude(x17); __ Mov(x17, target); __ Jump(x17); @@ -737,7 +737,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); temps.Exclude(x17); __ Mov(x17, reg); __ Jump(x17); @@ -750,7 +750,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); if (v8_flags.debug_code) { // Check the function's context matches the context argument. - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireX(); __ LoadTaggedPointerField( temp, FieldMemOperand(func, JSFunction::kContextOffset)); @@ -860,7 +860,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(BUILTIN_CODE(isolate(), AbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -1051,39 +1051,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_IEEE754_UNOP(tanh); break; case kArm64Float32RoundDown: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatS, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatS, kFormat4S); break; case kArm64Float64RoundDown: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatD, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatD, kFormat2D); break; case kArm64Float32RoundUp: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatS, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatS, kFormat4S); break; case kArm64Float64RoundUp: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatD, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatD, kFormat2D); break; case kArm64Float64RoundTiesAway: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frinta, instr, i, kFormatD, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frinta, instr, i, kFormatD, kFormat2D); break; case kArm64Float32RoundTruncate: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatS, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatS, kFormat4S); break; case kArm64Float64RoundTruncate: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatD, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatD, kFormat2D); break; case kArm64Float32RoundTiesEven: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatS, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatS, kFormat4S); break; case kArm64Float64RoundTiesEven: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatD, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatD, kFormat2D); break; case kArm64Add: @@ -1314,14 +1314,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1)); break; case kArm64Imod: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireX(); __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1)); __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0)); break; } case kArm64Imod32: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireW(); __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1)); __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1), @@ -1329,14 +1329,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64Umod: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireX(); __ Udiv(temp, i.InputRegister(0), i.InputRegister(1)); __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0)); break; } case kArm64Umod32: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireW(); __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1)); __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1), @@ -1650,7 +1650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArm64Float64Mod: { // TODO(turbofan): implement directly. - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); DCHECK_EQ(d0, i.InputDoubleRegister(0)); DCHECK_EQ(d1, i.InputDoubleRegister(1)); DCHECK_EQ(d0, i.OutputDoubleRegister()); @@ -2369,7 +2369,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add); SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub); case kArm64I64x2Mul: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister dst = i.OutputSimd128Register(); VRegister src1 = i.InputSimd128Register(0); VRegister src2 = i.InputSimd128Register(1); @@ -2470,7 +2470,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi); SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs); case kArm64I32x4BitMask: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register dst = i.OutputRegister32(); VRegister src = i.InputSimd128Register(0); VRegister tmp = scope.AcquireQ(); @@ -2486,7 +2486,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64I32x4DotI16x8S: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister lhs = i.InputSimd128Register(0); VRegister rhs = i.InputSimd128Register(1); VRegister tmp1 = scope.AcquireV(kFormat4S); @@ -2497,7 +2497,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64I16x8DotI8x16S: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister lhs = i.InputSimd128Register(0); VRegister rhs = i.InputSimd128Register(1); VRegister tmp1 = scope.AcquireV(kFormat8H); @@ -2515,7 +2515,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputSimd128Register(1).V16B()); } else { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister lhs = i.InputSimd128Register(0); VRegister rhs = i.InputSimd128Register(1); VRegister tmp1 = scope.AcquireV(kFormat8H); @@ -2553,7 +2553,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( VRegister dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireV(kFormat4S); if (dst == src1) { __ Mov(temp, src1.V4S()); @@ -2574,7 +2574,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( VRegister dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireV(kFormat4S); if (dst == src1) { __ Mov(temp, src1.V4S()); @@ -2588,7 +2588,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub); SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H); case kArm64I16x8BitMask: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register dst = i.OutputRegister32(); VRegister src = i.InputSimd128Register(0); VRegister tmp = scope.AcquireQ(); @@ -2615,7 +2615,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( VRegister dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireV(kFormat8H); if (dst == src1) { __ Mov(temp, src1.V8H()); @@ -2633,7 +2633,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( VRegister dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireV(kFormat8H); if (dst == src1) { __ Mov(temp, src1.V8H()); @@ -2644,7 +2644,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64I8x16BitMask: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register dst = i.OutputRegister32(); VRegister src = i.InputSimd128Register(0); VRegister tmp = scope.AcquireQ(); @@ -2733,7 +2733,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( src1 = i.InputSimd128Register(1).V4S(); // Check for in-place shuffles. // If dst == src0 == src1, then the shuffle is unary and we only use src0. - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireV(kFormat4S); if (dst == src0) { __ Mov(temp, src0); @@ -2799,7 +2799,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_EQ(0, (imm1 | imm2) & (src0 == src1 ? 0xF0F0F0F0F0F0F0F0 : 0xE0E0E0E0E0E0E0E0)); - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireV(kFormat16B); __ Movi(temp, imm2, imm1); @@ -2878,7 +2878,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64V128AnyTrue: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); // For AnyTrue, the format does not matter; also, we would like to avoid // an expensive horizontal reduction. VRegister temp = scope.AcquireV(kFormat4S); @@ -2891,7 +2891,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } #define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \ case Op: { \ - UseScratchRegisterScope scope(tasm()); \ + UseScratchRegisterScope scope(masm()); \ VRegister temp = scope.AcquireV(format); \ __ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \ __ Umov(i.OutputRegister32(), temp, 0); \ @@ -3045,7 +3045,7 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) { void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { Arm64OperandConverter i(this, instr); - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register input = i.InputRegister32(0); Register temp = scope.AcquireX(); size_t const case_count = instr->InputCount() - 2; @@ -3066,7 +3066,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { { const size_t instruction_count = case_count * instructions_per_case + instructions_per_jump_target; - TurboAssembler::BlockPoolsScope block_pools(tasm(), + MacroAssembler::BlockPoolsScope block_pools(masm(), instruction_count * kInstrSize); __ Bind(&table); for (size_t index = 0; index < case_count; ++index) { @@ -3125,10 +3125,10 @@ void CodeGenerator::AssembleConstructFrame() { DCHECK_EQ(required_slots % 2, 1); __ Prologue(); // Update required_slots count since we have just claimed one extra slot. - static_assert(TurboAssembler::kExtraSlotClaimedByPrologue == 1); - required_slots -= TurboAssembler::kExtraSlotClaimedByPrologue; + static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1); + required_slots -= MacroAssembler::kExtraSlotClaimedByPrologue; } else { - __ Push(lr, fp); + __ Push(lr, fp); __ Mov(fp, sp); } unwinding_info_writer_.MarkFrameConstructed(__ pc_offset()); @@ -3151,7 +3151,7 @@ void CodeGenerator::AssembleConstructFrame() { // One unoptimized frame slot has already been claimed when the actual // arguments count was pushed. required_slots -= - unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue; + unoptimized_frame_slots - MacroAssembler::kExtraSlotClaimedByPrologue; } #if V8_ENABLE_WEBASSEMBLY @@ -3165,7 +3165,7 @@ void CodeGenerator::AssembleConstructFrame() { // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register scratch = scope.AcquireX(); __ Ldr(scratch, FieldMemOperand( kWasmInstanceRegister, @@ -3178,7 +3178,7 @@ void CodeGenerator::AssembleConstructFrame() { { // Finish the frame that hasn't been fully built yet. - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ Mov(scratch, StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); @@ -3209,7 +3209,7 @@ void CodeGenerator::AssembleConstructFrame() { __ Claim(required_slots); break; case CallDescriptor::kCallCodeObject: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ Mov(scratch, StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); @@ -3225,7 +3225,7 @@ void CodeGenerator::AssembleConstructFrame() { } #if V8_ENABLE_WEBASSEMBLY case CallDescriptor::kCallWasmFunction: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ Mov(scratch, StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); @@ -3235,7 +3235,7 @@ void CodeGenerator::AssembleConstructFrame() { } case CallDescriptor::kCallWasmImportWrapper: case CallDescriptor::kCallWasmCapiFunction: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ Mov(scratch, StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); @@ -3254,7 +3254,7 @@ void CodeGenerator::AssembleConstructFrame() { case CallDescriptor::kCallAddress: #if V8_ENABLE_WEBASSEMBLY if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY)); __ Push(scratch, padreg); @@ -3392,7 +3392,7 @@ void CodeGenerator::PrepareForDeoptimizationExits( } // Emit the jumps to deoptimization entries. - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register scratch = scope.AcquireX(); static_assert(static_cast(kFirstDeoptimizeKind) == 0); for (int i = 0; i < kDeoptimizeKindCount; i++) { @@ -3417,9 +3417,9 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) { __ Push(padreg, g.ToRegister(source)); frame_access_state()->IncreaseSPDelta(new_slots); } else if (source->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); - __ Ldr(scratch, g.ToMemOperand(source, tasm())); + __ Ldr(scratch, g.ToMemOperand(source, masm())); __ Push(padreg, scratch); frame_access_state()->IncreaseSPDelta(new_slots); } else { @@ -3440,10 +3440,10 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) { if (dest->IsRegister()) { __ Pop(g.ToRegister(dest), padreg); } else if (dest->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ Pop(scratch, padreg); - __ Str(scratch, g.ToMemOperand(dest, tasm())); + __ Str(scratch, g.ToMemOperand(dest, masm())); } else { int last_frame_slot_id = frame_access_state_->frame()->GetTotalFrameSlotCount() - 1; @@ -3468,7 +3468,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source, MachineRepresentation rep) { // Must be kept in sync with {MoveTempLocationTo}. DCHECK(!source->IsImmediate()); - move_cycle_.temps.emplace(tasm()); + move_cycle_.temps.emplace(masm()); auto& temps = *move_cycle_.temps; // Temporarily exclude the reserved scratch registers while we pick one to // resolve the move cycle. Re-include them immediately afterwards as they @@ -3506,7 +3506,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source, scratch_reg.code()); Arm64OperandConverter g(this, nullptr); if (source->IsStackSlot()) { - __ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, tasm())); + __ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, masm())); } else { DCHECK(source->IsRegister()); __ fmov(g.ToDoubleRegister(&scratch), g.ToRegister(source)); @@ -3535,7 +3535,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest, move_cycle_.scratch_reg->code()); Arm64OperandConverter g(this, nullptr); if (dest->IsStackSlot()) { - __ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, tasm())); + __ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, masm())); } else { DCHECK(dest->IsRegister()); __ fmov(g.ToRegister(dest), g.ToDoubleRegister(&scratch)); @@ -3557,9 +3557,9 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) { auto move_type = MoveType::InferMove(&move->source(), &move->destination()); if (move_type == MoveType::kStackToStack) { Arm64OperandConverter g(this, nullptr); - MemOperand src = g.ToMemOperand(&move->source(), tasm()); - MemOperand dst = g.ToMemOperand(&move->destination(), tasm()); - UseScratchRegisterScope temps(tasm()); + MemOperand src = g.ToMemOperand(&move->source(), masm()); + MemOperand dst = g.ToMemOperand(&move->destination(), masm()); + UseScratchRegisterScope temps(masm()); if (move->source().IsSimd128StackSlot()) { VRegister temp = temps.AcquireQ(); move_cycle_.scratch_fp_regs.set(temp); @@ -3574,11 +3574,11 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) { // Offset doesn't fit into the immediate field so the assembler will emit // two instructions and use a second temp register. if ((src.IsImmediateOffset() && - !tasm()->IsImmLSScaled(src_offset, src_size) && - !tasm()->IsImmLSUnscaled(src_offset)) || + !masm()->IsImmLSScaled(src_offset, src_size) && + !masm()->IsImmLSUnscaled(src_offset)) || (dst.IsImmediateOffset() && - !tasm()->IsImmLSScaled(dst_offset, dst_size) && - !tasm()->IsImmLSUnscaled(dst_offset))) { + !masm()->IsImmLSScaled(dst_offset, dst_size) && + !masm()->IsImmLSUnscaled(dst_offset))) { Register temp = temps.AcquireX(); move_cycle_.scratch_regs.set(temp); } @@ -3627,7 +3627,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } return; case MoveType::kRegisterToStack: { - MemOperand dst = g.ToMemOperand(destination, tasm()); + MemOperand dst = g.ToMemOperand(destination, masm()); if (source->IsRegister()) { __ Str(g.ToRegister(source), dst); } else { @@ -3642,7 +3642,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, return; } case MoveType::kStackToRegister: { - MemOperand src = g.ToMemOperand(source, tasm()); + MemOperand src = g.ToMemOperand(source, masm()); if (destination->IsRegister()) { __ Ldr(g.ToRegister(destination), src); } else { @@ -3657,15 +3657,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, return; } case MoveType::kStackToStack: { - MemOperand src = g.ToMemOperand(source, tasm()); - MemOperand dst = g.ToMemOperand(destination, tasm()); + MemOperand src = g.ToMemOperand(source, masm()); + MemOperand dst = g.ToMemOperand(destination, masm()); if (source->IsSimd128StackSlot()) { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireQ(); __ Ldr(temp, src); __ Str(temp, dst); } else { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireX(); __ Ldr(temp, src); __ Str(temp, dst); @@ -3689,9 +3689,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } case MoveType::kConstantToStack: { Constant src = g.ToConstant(source); - MemOperand dst = g.ToMemOperand(destination, tasm()); + MemOperand dst = g.ToMemOperand(destination, masm()); if (destination->IsStackSlot()) { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireX(); MoveConstantToRegister(temp, src); __ Str(temp, dst); @@ -3699,7 +3699,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, if (base::bit_cast(src.ToFloat32()) == 0) { __ Str(wzr, dst); } else { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireS(); __ Fmov(temp, src.ToFloat32()); __ Str(temp, dst); @@ -3709,7 +3709,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, if (src.ToFloat64().AsUint64() == 0) { __ Str(xzr, dst); } else { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireD(); __ Fmov(temp, src.ToFloat64().value()); __ Str(temp, dst); @@ -3740,8 +3740,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, } return; case MoveType::kRegisterToStack: { - UseScratchRegisterScope scope(tasm()); - MemOperand dst = g.ToMemOperand(destination, tasm()); + UseScratchRegisterScope scope(masm()); + MemOperand dst = g.ToMemOperand(destination, masm()); if (source->IsRegister()) { Register temp = scope.AcquireX(); Register src = g.ToRegister(source); @@ -3749,7 +3749,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, __ Ldr(src, dst); __ Str(temp, dst); } else { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister src = g.ToDoubleRegister(source); if (source->IsFloatRegister() || source->IsDoubleRegister()) { VRegister temp = scope.AcquireD(); @@ -3767,9 +3767,9 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, return; } case MoveType::kStackToStack: { - UseScratchRegisterScope scope(tasm()); - MemOperand src = g.ToMemOperand(source, tasm()); - MemOperand dst = g.ToMemOperand(destination, tasm()); + UseScratchRegisterScope scope(masm()); + MemOperand src = g.ToMemOperand(source, masm()); + MemOperand dst = g.ToMemOperand(destination, masm()); VRegister temp_0 = scope.AcquireD(); VRegister temp_1 = scope.AcquireD(); if (source->IsSimd128StackSlot()) { diff --git a/src/compiler/backend/arm64/instruction-selector-arm64.cc b/src/compiler/backend/arm64/instruction-selector-arm64.cc index 5c0c6415c5..730519846e 100644 --- a/src/compiler/backend/arm64/instruction-selector-arm64.cc +++ b/src/compiler/backend/arm64/instruction-selector-arm64.cc @@ -623,7 +623,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) { ptrdiff_t const delta = g.GetIntegerConstantValue(index) + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector->isolate(), m.ResolvedValue()); input_count = 1; // Check that the delta is a 32-bit integer due to the limitations of @@ -988,7 +988,7 @@ void InstructionSelector::VisitStore(Node* node) { CanAddressRelativeToRootsRegister(m.ResolvedValue())) { ptrdiff_t const delta = g.GetIntegerConstantValue(index) + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( isolate(), m.ResolvedValue()); if (is_int32(delta)) { input_count = 2; diff --git a/src/compiler/backend/code-generator-impl.h b/src/compiler/backend/code-generator-impl.h index b8238a36a7..4a2c770b88 100644 --- a/src/compiler/backend/code-generator-impl.h +++ b/src/compiler/backend/code-generator-impl.h @@ -266,14 +266,14 @@ class OutOfLineCode : public ZoneObject { Label* entry() { return &entry_; } Label* exit() { return &exit_; } const Frame* frame() const { return frame_; } - TurboAssembler* tasm() { return tasm_; } + MacroAssembler* masm() { return masm_; } OutOfLineCode* next() const { return next_; } private: Label entry_; Label exit_; const Frame* const frame_; - TurboAssembler* const tasm_; + MacroAssembler* const masm_; OutOfLineCode* const next_; }; diff --git a/src/compiler/backend/code-generator.cc b/src/compiler/backend/code-generator.cc index 4d747d7fc5..9f1f45b90c 100644 --- a/src/compiler/backend/code-generator.cc +++ b/src/compiler/backend/code-generator.cc @@ -64,7 +64,7 @@ CodeGenerator::CodeGenerator( current_block_(RpoNumber::Invalid()), start_source_position_(start_source_position), current_source_position_(SourcePosition::Unknown()), - tasm_(isolate, options, CodeObjectRequired::kNo, + masm_(isolate, options, CodeObjectRequired::kNo, #if V8_ENABLE_WEBASSEMBLY buffer_cache ? buffer_cache->GetAssemblerBuffer( AssemblerBase::kDefaultBufferSize) @@ -98,15 +98,15 @@ CodeGenerator::CodeGenerator( } CreateFrameAccessState(frame); CHECK_EQ(info->is_osr(), osr_helper_.has_value()); - tasm_.set_jump_optimization_info(jump_opt); + masm_.set_jump_optimization_info(jump_opt); CodeKind code_kind = info->code_kind(); if (code_kind == CodeKind::WASM_FUNCTION || code_kind == CodeKind::WASM_TO_CAPI_FUNCTION || code_kind == CodeKind::WASM_TO_JS_FUNCTION || code_kind == CodeKind::JS_TO_WASM_FUNCTION) { - tasm_.set_abort_hard(true); + masm_.set_abort_hard(true); } - tasm_.set_builtin(builtin); + masm_.set_builtin(builtin); } bool CodeGenerator::wasm_runtime_exception_support() const { @@ -173,19 +173,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall( Label* jump_deoptimization_entry_label = &jump_deoptimization_entry_labels_[static_cast(deopt_kind)]; if (info()->source_positions()) { - tasm()->RecordDeoptReason(deoptimization_reason, exit->node_id(), + masm()->RecordDeoptReason(deoptimization_reason, exit->node_id(), exit->pos(), deoptimization_id); } if (deopt_kind == DeoptimizeKind::kLazy) { ++lazy_deopt_count_; - tasm()->BindExceptionHandler(exit->label()); + masm()->BindExceptionHandler(exit->label()); } else { ++eager_deopt_count_; - tasm()->bind(exit->label()); + masm()->bind(exit->label()); } Builtin target = Deoptimizer::GetDeoptimizationEntry(deopt_kind); - tasm()->CallForDeoptimization(target, deoptimization_id, exit->label(), + masm()->CallForDeoptimization(target, deoptimization_id, exit->label(), deopt_kind, exit->continue_label(), jump_deoptimization_entry_label); @@ -195,7 +195,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall( } void CodeGenerator::MaybeEmitOutOfLineConstantPool() { - tasm()->MaybeEmitOutOfLineConstantPool(); + masm()->MaybeEmitOutOfLineConstantPool(); } void CodeGenerator::AssembleCode() { @@ -204,27 +204,27 @@ void CodeGenerator::AssembleCode() { // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done in AssemblePrologue). - FrameScope frame_scope(tasm(), StackFrame::MANUAL); + FrameScope frame_scope(masm(), StackFrame::MANUAL); if (info->source_positions()) { AssembleSourcePosition(start_source_position()); } - offsets_info_.code_start_register_check = tasm()->pc_offset(); + offsets_info_.code_start_register_check = masm()->pc_offset(); - tasm()->CodeEntry(); + masm()->CodeEntry(); // Check that {kJavaScriptCallCodeStartRegister} has been set correctly. if (v8_flags.debug_code && info->called_with_code_start_register()) { - tasm()->RecordComment("-- Prologue: check code start register --"); + masm()->RecordComment("-- Prologue: check code start register --"); AssembleCodeStartRegisterCheck(); } - offsets_info_.deopt_check = tasm()->pc_offset(); + offsets_info_.deopt_check = masm()->pc_offset(); // We want to bailout only from JS functions, which are the only ones // that are optimized. if (info->IsOptimizing()) { DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall()); - tasm()->RecordComment("-- Prologue: check for deoptimization --"); + masm()->RecordComment("-- Prologue: check for deoptimization --"); BailoutIfDeoptimized(); } @@ -258,22 +258,22 @@ void CodeGenerator::AssembleCode() { instr_starts_.assign(instructions()->instructions().size(), {}); } // Assemble instructions in assembly order. - offsets_info_.blocks_start = tasm()->pc_offset(); + offsets_info_.blocks_start = masm()->pc_offset(); for (const InstructionBlock* block : instructions()->ao_blocks()) { // Align loop headers on vendor recommended boundaries. - if (!tasm()->jump_optimization_info()) { + if (!masm()->jump_optimization_info()) { if (block->ShouldAlignLoopHeader()) { - tasm()->LoopHeaderAlign(); + masm()->LoopHeaderAlign(); } else if (block->ShouldAlignCodeTarget()) { - tasm()->CodeTargetAlign(); + masm()->CodeTargetAlign(); } } if (info->trace_turbo_json()) { - block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset(); + block_starts_[block->rpo_number().ToInt()] = masm()->pc_offset(); } // Bind a label for a block. current_block_ = block->rpo_number(); - unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block); + unwinding_info_writer_.BeginInstructionBlock(masm()->pc_offset(), block); if (v8_flags.code_comments) { std::ostringstream buffer; buffer << "-- B" << block->rpo_number().ToInt() << " start"; @@ -289,12 +289,12 @@ void CodeGenerator::AssembleCode() { buffer << " (in loop " << block->loop_header().ToInt() << ")"; } buffer << " --"; - tasm()->RecordComment(buffer.str().c_str()); + masm()->RecordComment(buffer.str().c_str()); } frame_access_state()->MarkHasFrame(block->needs_frame()); - tasm()->bind(GetLabel(current_block_)); + masm()->bind(GetLabel(current_block_)); if (block->must_construct_frame()) { AssembleConstructFrame(); @@ -303,7 +303,7 @@ void CodeGenerator::AssembleCode() { // using the roots. // TODO(mtrofin): investigate how we can avoid doing this repeatedly. if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) { - tasm()->InitializeRootRegister(); + masm()->InitializeRootRegister(); } } #ifdef V8_TARGET_ARCH_RISCV64 @@ -312,10 +312,10 @@ void CodeGenerator::AssembleCode() { // back between blocks. the Rvv instruction may get an incorrect vtype. so // here VectorUnit needs to be cleared to ensure that the vtype is correct // within the block. - tasm()->VU.clear(); + masm()->VU.clear(); #endif if (V8_EMBEDDED_CONSTANT_POOL_BOOL && !block->needs_frame()) { - ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); + ConstantPoolUnavailableScope constant_pool_unavailable(masm()); result_ = AssembleBlock(block); } else { result_ = AssembleBlock(block); @@ -325,29 +325,29 @@ void CodeGenerator::AssembleCode() { } // Assemble all out-of-line code. - offsets_info_.out_of_line_code = tasm()->pc_offset(); + offsets_info_.out_of_line_code = masm()->pc_offset(); if (ools_) { - tasm()->RecordComment("-- Out of line code --"); + masm()->RecordComment("-- Out of line code --"); for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) { - tasm()->bind(ool->entry()); + masm()->bind(ool->entry()); ool->Generate(); - if (ool->exit()->is_bound()) tasm()->jmp(ool->exit()); + if (ool->exit()->is_bound()) masm()->jmp(ool->exit()); } } // This nop operation is needed to ensure that the trampoline is not // confused with the pc of the call before deoptimization. // The test regress/regress-259 is an example of where we need it. - tasm()->nop(); + masm()->nop(); // For some targets, we must make sure that constant and veneer pools are // emitted before emitting the deoptimization exits. PrepareForDeoptimizationExits(&deoptimization_exits_); - deopt_exit_start_offset_ = tasm()->pc_offset(); + deopt_exit_start_offset_ = masm()->pc_offset(); // Assemble deoptimization exits. - offsets_info_.deoptimization_exits = tasm()->pc_offset(); + offsets_info_.deoptimization_exits = masm()->pc_offset(); int last_updated = 0; // We sort the deoptimization exits here so that the lazy ones will be visited // last. We need this as lazy deopts might need additional instructions. @@ -367,7 +367,7 @@ void CodeGenerator::AssembleCode() { { #ifdef V8_TARGET_ARCH_PPC64 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool( - tasm()); + masm()); #endif for (DeoptimizationExit* exit : deoptimization_exits_) { if (exit->emitted()) continue; @@ -388,19 +388,19 @@ void CodeGenerator::AssembleCode() { } } - offsets_info_.pools = tasm()->pc_offset(); + offsets_info_.pools = masm()->pc_offset(); // TODO(jgruber): Move all inlined metadata generation into a new, // architecture-independent version of FinishCode. Currently, this includes // the safepoint table, handler table, constant pool, and code comments, in // that order. FinishCode(); - offsets_info_.jump_tables = tasm()->pc_offset(); + offsets_info_.jump_tables = masm()->pc_offset(); // Emit the jump tables. if (jump_tables_) { - tasm()->Align(kSystemPointerSize); + masm()->Align(kSystemPointerSize); for (JumpTable* table = jump_tables_; table; table = table->next()) { - tasm()->bind(table->label()); + masm()->bind(table->label()); AssembleJumpTable(table->targets(), table->target_count()); } } @@ -408,24 +408,24 @@ void CodeGenerator::AssembleCode() { // The LinuxPerfJitLogger logs code up until here, excluding the safepoint // table. Resolve the unwinding info now so it is aware of the same code // size as reported by perf. - unwinding_info_writer_.Finish(tasm()->pc_offset()); + unwinding_info_writer_.Finish(masm()->pc_offset()); // Final alignment before starting on the metadata section. - tasm()->Align(InstructionStream::kMetadataAlignment); + masm()->Align(InstructionStream::kMetadataAlignment); - safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount()); + safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount()); // Emit the exception handler table. if (!handlers_.empty()) { - handler_table_offset_ = HandlerTable::EmitReturnTableStart(tasm()); + handler_table_offset_ = HandlerTable::EmitReturnTableStart(masm()); for (size_t i = 0; i < handlers_.size(); ++i) { - HandlerTable::EmitReturnEntry(tasm(), handlers_[i].pc_offset, + HandlerTable::EmitReturnEntry(masm(), handlers_[i].pc_offset, handlers_[i].handler->pos()); } } - tasm()->MaybeEmitOutOfLineConstantPool(); - tasm()->FinalizeJumpOptimizationInfo(); + masm()->MaybeEmitOutOfLineConstantPool(); + masm()->FinalizeJumpOptimizationInfo(); result_ = kSuccess; } @@ -435,7 +435,7 @@ void CodeGenerator::AssembleArchBinarySearchSwitchRange( std::pair* end) { if (end - begin < kBinarySearchSwitchMinimalCases) { while (begin != end) { - tasm()->JumpIfEqual(input, begin->first, begin->second); + masm()->JumpIfEqual(input, begin->first, begin->second); ++begin; } AssembleArchJumpRegardlessOfAssemblyOrder(def_block); @@ -443,9 +443,9 @@ void CodeGenerator::AssembleArchBinarySearchSwitchRange( } auto middle = begin + (end - begin) / 2; Label less_label; - tasm()->JumpIfLessThan(input, middle->first, &less_label); + masm()->JumpIfLessThan(input, middle->first, &less_label); AssembleArchBinarySearchSwitchRange(input, def_block, middle, end); - tasm()->bind(&less_label); + masm()->bind(&less_label); AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle); } @@ -469,7 +469,7 @@ base::OwnedVector CodeGenerator::GetProtectedInstructionsData() { MaybeHandle CodeGenerator::FinalizeCode() { if (result_ != kSuccess) { - tasm()->AbortedCodeGeneration(); + masm()->AbortedCodeGeneration(); return {}; } @@ -482,11 +482,11 @@ MaybeHandle CodeGenerator::FinalizeCode() { // Allocate and install the code. CodeDesc desc; - tasm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_); + masm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_); #if defined(V8_OS_WIN64) if (Builtins::IsBuiltinId(info_->builtin())) { - isolate_->SetBuiltinUnwindData(info_->builtin(), tasm()->GetUnwindInfo()); + isolate_->SetBuiltinUnwindData(info_->builtin(), masm()->GetUnwindInfo()); } #endif // V8_OS_WIN64 @@ -508,7 +508,7 @@ MaybeHandle CodeGenerator::FinalizeCode() { Handle code; if (!maybe_code.ToHandle(&code)) { - tasm()->AbortedCodeGeneration(); + masm()->AbortedCodeGeneration(); return {}; } @@ -527,7 +527,7 @@ bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const { } void CodeGenerator::RecordSafepoint(ReferenceMap* references) { - auto safepoint = safepoints()->DefineSafepoint(tasm()); + auto safepoint = safepoints()->DefineSafepoint(masm()); int frame_header_offset = frame()->GetFixedSlotCount(); for (const InstructionOperand& operand : references->reference_operands()) { if (operand.IsStackSlot()) { @@ -558,7 +558,7 @@ bool CodeGenerator::IsMaterializableFromRoot(Handle object, CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock( const InstructionBlock* block) { if (block->IsHandler()) { - tasm()->ExceptionHandler(); + masm()->ExceptionHandler(); } for (int i = block->code_start(); i < block->code_end(); ++i) { CodeGenResult result = AssembleInstruction(i, block); @@ -718,7 +718,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( int instruction_index, const InstructionBlock* block) { Instruction* instr = instructions()->InstructionAt(instruction_index); if (info()->trace_turbo_json()) { - instr_starts_[instruction_index].gap_pc_offset = tasm()->pc_offset(); + instr_starts_[instruction_index].gap_pc_offset = masm()->pc_offset(); } int first_unused_stack_slot; FlagsMode mode = FlagsModeField::decode(instr->opcode()); @@ -738,14 +738,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( AssembleDeconstructFrame(); } if (info()->trace_turbo_json()) { - instr_starts_[instruction_index].arch_instr_pc_offset = tasm()->pc_offset(); + instr_starts_[instruction_index].arch_instr_pc_offset = masm()->pc_offset(); } // Assemble architecture-specific code for the instruction. CodeGenResult result = AssembleArchInstruction(instr); if (result != kSuccess) return result; if (info()->trace_turbo_json()) { - instr_starts_[instruction_index].condition_pc_offset = tasm()->pc_offset(); + instr_starts_[instruction_index].condition_pc_offset = masm()->pc_offset(); } FlagsCondition condition = FlagsConditionField::decode(instr->opcode()); @@ -779,7 +779,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( branch.false_label = exit->continue_label(); branch.fallthru = true; AssembleArchDeoptBranch(instr, &branch); - tasm()->bind(exit->continue_label()); + masm()->bind(exit->continue_label()); break; } case kFlags_set: { @@ -818,7 +818,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) { if (source_position == current_source_position_) return; current_source_position_ = source_position; if (!source_position.IsKnown()) return; - source_position_table_builder_.AddPosition(tasm()->pc_offset(), + source_position_table_builder_.AddPosition(masm()->pc_offset(), source_position, false); if (v8_flags.code_comments) { OptimizedCompilationInfo* info = this->info(); @@ -833,8 +833,8 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) { buffer << "-- "; // Turbolizer only needs the source position, as it can reconstruct // the inlining stack from other information. - if (info->trace_turbo_json() || !tasm()->isolate() || - tasm()->isolate()->concurrent_recompilation_enabled()) { + if (info->trace_turbo_json() || !masm()->isolate() || + masm()->isolate()->concurrent_recompilation_enabled()) { buffer << source_position; } else { AllowGarbageCollection allocation; @@ -843,7 +843,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) { buffer << source_position.InliningStack(info); } buffer << " --"; - tasm()->RecordComment(buffer.str().c_str()); + masm()->RecordComment(buffer.str().c_str()); } } @@ -981,7 +981,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) { RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1); DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler()); handlers_.push_back( - {GetLabel(handler_rpo), tasm()->pc_offset_for_safepoint()}); + {GetLabel(handler_rpo), masm()->pc_offset_for_safepoint()}); } if (needs_frame_state) { @@ -991,7 +991,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) { size_t frame_state_offset = 1; FrameStateDescriptor* descriptor = GetDeoptimizationEntry(instr, frame_state_offset).descriptor(); - int pc_offset = tasm()->pc_offset_for_safepoint(); + int pc_offset = masm()->pc_offset_for_safepoint(); BuildTranslation(instr, pc_offset, frame_state_offset, 0, descriptor->state_combine()); } @@ -1325,7 +1325,7 @@ void CodeGenerator::AddTranslationForOperand(Instruction* instr, } void CodeGenerator::MarkLazyDeoptSite() { - last_lazy_deopt_pc_ = tasm()->pc_offset(); + last_lazy_deopt_pc_ = masm()->pc_offset(); } DeoptimizationExit* CodeGenerator::AddDeoptimizationExit( @@ -1336,7 +1336,7 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit( } OutOfLineCode::OutOfLineCode(CodeGenerator* gen) - : frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) { + : frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) { gen->ools_ = this; } diff --git a/src/compiler/backend/code-generator.h b/src/compiler/backend/code-generator.h index 288d67f4df..f6feb42378 100644 --- a/src/compiler/backend/code-generator.h +++ b/src/compiler/backend/code-generator.h @@ -188,7 +188,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { void RecordSafepoint(ReferenceMap* references); Zone* zone() const { return zone_; } - TurboAssembler* tasm() { return &tasm_; } + MacroAssembler* masm() { return &masm_; } SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; } size_t handler_table_offset() const { return handler_table_offset_; } @@ -448,7 +448,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { RpoNumber current_block_; SourcePosition start_source_position_; SourcePosition current_source_position_; - TurboAssembler tasm_; + MacroAssembler masm_; GapResolver resolver_; SafepointTableBuilder safepoints_; ZoneVector handlers_; diff --git a/src/compiler/backend/ia32/code-generator-ia32.cc b/src/compiler/backend/ia32/code-generator-ia32.cc index 4c9724a3a8..5ced2002c1 100644 --- a/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/src/compiler/backend/ia32/code-generator-ia32.cc @@ -29,7 +29,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> #define kScratchDoubleReg xmm0 @@ -202,11 +202,11 @@ class IA32OperandConverter : public InstructionOperandConverter { void MoveInstructionOperandToRegister(Register destination, InstructionOperand* op) { if (op->IsImmediate() || op->IsConstant()) { - gen_->tasm()->mov(destination, ToImmediate(op)); + gen_->masm()->mov(destination, ToImmediate(op)); } else if (op->IsRegister()) { - gen_->tasm()->Move(destination, ToRegister(op)); + gen_->masm()->Move(destination, ToRegister(op)); } else { - gen_->tasm()->mov(destination, ToOperand(op)); + gen_->masm()->mov(destination, ToOperand(op)); } } }; @@ -475,7 +475,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { XMMRegister src0 = i.InputSimd128Register(0); \ Operand src1 = i.InputOperand(instr->InputCount() == 2 ? 1 : 0); \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##opcode(i.OutputSimd128Register(), src0, src1); \ } else { \ DCHECK_EQ(i.OutputSimd128Register(), src0); \ @@ -485,11 +485,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode { #define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, SSELevel, imm) \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \ i.InputOperand(1), imm); \ } else { \ - CpuFeatureScope sse_scope(tasm(), SSELevel); \ + CpuFeatureScope sse_scope(masm(), SSELevel); \ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \ __ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \ } @@ -532,26 +532,25 @@ class OutOfLineRecordWrite final : public OutOfLineCode { int8_t laneidx = i.InputInt8(1); \ if (HasAddressingMode(instr)) { \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##OPCODE(dst, src, i.MemoryOperand(2), laneidx); \ } else { \ DCHECK_EQ(dst, src); \ - CpuFeatureScope sse_scope(tasm(), CPU_FEATURE); \ + CpuFeatureScope sse_scope(masm(), CPU_FEATURE); \ __ OPCODE(dst, i.MemoryOperand(2), laneidx); \ } \ } else { \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##OPCODE(dst, src, i.InputOperand(2), laneidx); \ } else { \ DCHECK_EQ(dst, src); \ - CpuFeatureScope sse_scope(tasm(), CPU_FEATURE); \ + CpuFeatureScope sse_scope(masm(), CPU_FEATURE); \ __ OPCODE(dst, i.InputOperand(2), laneidx); \ } \ } \ } while (false) - void CodeGenerator::AssembleDeconstructFrame() { __ mov(esp, ebp); __ pop(ebp); @@ -566,7 +565,7 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void AdjustStackPointerForTailCall(TurboAssembler* tasm, +void AdjustStackPointerForTailCall(MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, bool allow_shrinkage = true) { @@ -574,10 +573,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, StandardFrameConstants::kFixedSlotCountAboveFp; int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { - tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize); + masm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { - tasm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize)); + masm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize)); state->IncreaseSPDelta(stack_slot_delta); } } @@ -617,7 +616,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, LocationOperand destination_location( LocationOperand::cast(move->destination())); InstructionOperand source(move->source()); - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), destination_location.index()); if (source.IsStackSlot()) { LocationOperand source_location(LocationOperand::cast(source)); @@ -635,13 +634,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, move->Eliminate(); } } - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } @@ -884,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -1262,7 +1261,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Sqrtss(i.OutputDoubleRegister(), i.InputOperand(0)); break; case kIA32Float32Round: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); RoundingMode const mode = static_cast(MiscField::decode(instr->opcode())); __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); @@ -2112,12 +2111,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kIA32Insertps: { if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); __ vinsertps(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputOperand(2), i.InputInt8(1) << 4); } else { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); __ insertps(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1) << 4); } @@ -2315,12 +2314,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister src1 = i.InputSimd128Register(0); XMMRegister src2 = i.InputSimd128Register(1); if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); __ vpminsd(kScratchDoubleReg, src1, src2); __ vpcmpeqd(dst, kScratchDoubleReg, src2); } else { DCHECK_EQ(dst, src1); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); __ pminsd(dst, src2); __ pcmpeqd(dst, src2); } @@ -2328,7 +2327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI32x4UConvertF32x4: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister tmp = i.TempSimd128Register(0); // NAN->0, negative->0 @@ -2356,7 +2355,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kAVXI32x4UConvertF32x4: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister tmp = i.TempSimd128Register(0); // NAN->0, negative->0 @@ -2406,7 +2405,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI32x4GtU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); Operand src = i.InputOperand(1); __ pmaxud(dst, src); @@ -2416,7 +2415,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI32x4GtU: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); @@ -2428,7 +2427,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI32x4GeU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); Operand src = i.InputOperand(1); __ pminud(dst, src); @@ -2436,7 +2435,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI32x4GeU: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); __ vpminud(kScratchDoubleReg, src1, src2); @@ -2552,7 +2551,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI16x8Ne: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); __ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputOperand(1)); __ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); @@ -2574,7 +2573,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI16x8GeS: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); __ vpminsw(kScratchDoubleReg, src1, src2); @@ -2621,7 +2620,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI16x8GtU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); Operand src = i.InputOperand(1); __ pmaxuw(dst, src); @@ -2631,7 +2630,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI16x8GtU: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); @@ -2643,7 +2642,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI16x8GeU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); Operand src = i.InputOperand(1); __ pminuw(dst, src); @@ -2651,7 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI16x8GeU: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); __ vpminuw(kScratchDoubleReg, src1, src2); @@ -2844,7 +2843,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI8x16Ne: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); __ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputOperand(1)); __ vpcmpeqb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); @@ -2859,7 +2858,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI8x16GeS: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); Operand src = i.InputOperand(1); __ pminsb(dst, src); @@ -2867,7 +2866,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI8x16GeS: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); __ vpminsb(kScratchDoubleReg, src1, src2); @@ -2925,7 +2924,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI8x16GtU: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); @@ -2944,7 +2943,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI8x16GeU: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); __ vpminub(kScratchDoubleReg, src1, src2); @@ -3183,7 +3182,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister src = i.InputSimd128Register(0); uint8_t lane = i.InputUint8(1) & 0xf; if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); if (lane < 8) { __ vpunpcklbw(dst, src, src); } else { @@ -3234,7 +3233,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklbw); break; case kSSES16x8UnzipHigh: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src2 = dst; DCHECK_EQ(dst, i.InputSimd128Register(0)); @@ -3248,7 +3247,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXS16x8UnzipHigh: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src2 = dst; if (instr->InputCount() == 2) { @@ -3260,7 +3259,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kSSES16x8UnzipLow: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src2 = dst; DCHECK_EQ(dst, i.InputSimd128Register(0)); @@ -3274,7 +3273,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXS16x8UnzipLow: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src2 = dst; __ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); @@ -3301,7 +3300,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXS8x16UnzipHigh: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src2 = dst; if (instr->InputCount() == 2) { @@ -3328,7 +3327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXS8x16UnzipLow: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src2 = dst; if (instr->InputCount() == 2) { @@ -3357,7 +3356,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXS8x16TransposeLow: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); if (instr->InputCount() == 1) { __ vpsllw(kScratchDoubleReg, i.InputSimd128Register(0), 8); @@ -3387,7 +3386,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXS8x16TransposeHigh: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); if (instr->InputCount() == 1) { __ vpsrlw(dst, i.InputSimd128Register(0), 8); @@ -3423,7 +3422,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kAVXS8x4Reverse: case kAVXS8x8Reverse: { DCHECK_EQ(1, instr->InputCount()); - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = dst; if (arch_opcode != kAVXS8x2Reverse) { @@ -4205,8 +4204,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { __ j(greater, &mismatch_return, Label::kNear); __ Ret(parameter_slots * kSystemPointerSize, scratch_reg); __ bind(&mismatch_return); - __ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc_reg, scratch_reg, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); // We use a return instead of a jump for better return address prediction. __ Ret(); } else if (additional_pop_count->IsImmediate()) { diff --git a/src/compiler/backend/ia32/instruction-selector-ia32.cc b/src/compiler/backend/ia32/instruction-selector-ia32.cc index 0be9186615..44b24fa829 100644 --- a/src/compiler/backend/ia32/instruction-selector-ia32.cc +++ b/src/compiler/backend/ia32/instruction-selector-ia32.cc @@ -18,7 +18,7 @@ #include "src/codegen/ia32/assembler-ia32.h" #include "src/codegen/ia32/register-ia32.h" #include "src/codegen/machine-type.h" -#include "src/codegen/turbo-assembler.h" +#include "src/codegen/macro-assembler-base.h" #include "src/common/globals.h" #include "src/compiler/backend/instruction-codes.h" #include "src/compiler/backend/instruction-selector-impl.h" @@ -208,7 +208,7 @@ class IA32OperandGenerator final : public OperandGenerator { m.object().ResolvedValue())) { ptrdiff_t const delta = m.index().ResolvedValue() + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector()->isolate(), m.object().ResolvedValue()); if (is_int32(delta)) { inputs[(*input_count)++] = TempImmediate(static_cast(delta)); diff --git a/src/compiler/backend/instruction-selector.cc b/src/compiler/backend/instruction-selector.cc index 2aa074ba72..0a2c18dc5f 100644 --- a/src/compiler/backend/instruction-selector.cc +++ b/src/compiler/backend/instruction-selector.cc @@ -451,7 +451,7 @@ bool InstructionSelector::CanAddressRelativeToRootsRegister( // 3. IsAddressableThroughRootRegister: Is the target address guaranteed to // have a fixed root-relative offset? If so, we can ignore 2. const bool this_root_relative_offset_is_constant = - TurboAssemblerBase::IsAddressableThroughRootRegister(isolate(), + MacroAssemblerBase::IsAddressableThroughRootRegister(isolate(), reference); return this_root_relative_offset_is_constant; } diff --git a/src/compiler/backend/loong64/code-generator-loong64.cc b/src/compiler/backend/loong64/code-generator-loong64.cc index cf08ad96d3..a9944cf866 100644 --- a/src/compiler/backend/loong64/code-generator-loong64.cc +++ b/src/compiler/backend/loong64/code-generator-loong64.cc @@ -23,7 +23,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> // TODO(LOONG_dev): consider renaming these macros. #define TRACE_MSG(msg) \ @@ -450,8 +450,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_IEEE754_BINOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ - UseScratchRegisterScope temps(tasm()); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ + UseScratchRegisterScope temps(masm()); \ Register scratch = temps.Acquire(); \ __ PrepareCallCFunction(0, 2, scratch); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ @@ -459,8 +459,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_IEEE754_UNOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ - UseScratchRegisterScope temps(tasm()); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ + UseScratchRegisterScope temps(masm()); \ Register scratch = temps.Acquire(); \ __ PrepareCallCFunction(0, 1, scratch); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ @@ -487,7 +487,7 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void AdjustStackPointerForTailCall(TurboAssembler* tasm, +void AdjustStackPointerForTailCall(MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, bool allow_shrinkage = true) { @@ -495,10 +495,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, StandardFrameConstants::kFixedSlotCountAboveFp; int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { - tasm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize); + masm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { - tasm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize); + masm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } } @@ -507,19 +507,19 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } // Check that {kJavaScriptCallCodeStartRegister} is correct. void CodeGenerator::AssembleCodeStartRegisterCheck() { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ ComputeCodeStartAddress(scratch); __ Assert(eq, AbortReason::kWrongFunctionCodeStart, @@ -534,7 +534,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() { // 2. test kMarkedForDeoptimizationBit in those flags; and // 3. if it is not zero then it jumps to the builtin. void CodeGenerator::BailoutIfDeoptimized() { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; __ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset)); @@ -628,7 +628,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchCallJSFunction: { Register func = i.InputRegister(0); if (v8_flags.debug_code) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); // Check the function's context matches the context argument. __ Ld_d(scratch, FieldMemOperand(func, JSFunction::kContextOffset)); @@ -642,7 +642,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArchPrepareCallCFunction: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); int const num_gp_parameters = ParamField::decode(instr->opcode()); int const num_fp_parameters = FPParamField::decode(instr->opcode()); @@ -749,7 +749,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -829,7 +829,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } else { DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode); DCHECK_EQ(addressing_mode, kMode_MRI); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Add_d(scratch, object, Operand(i.InputInt64(1))); __ amswap_db_d(zero_reg, value, scratch); @@ -843,7 +843,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArchStackSlot: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); FrameOffset offset = frame_access_state()->GetFrameOffset(i.InputInt32(0)); @@ -1225,8 +1225,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kLoong64Float64Mod: { // TODO(turbofan): implement directly. - FrameScope scope(tasm(), StackFrame::MANUAL); - UseScratchRegisterScope temps(tasm()); + FrameScope scope(masm(), StackFrame::MANUAL); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ PrepareCallCFunction(0, 2, scratch); __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); @@ -1363,7 +1363,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ ftintrz_w_s(scratch_d, i.InputDoubleRegister(0)); __ movfr2gr_s(i.OutputRegister(), scratch_d); if (set_overflow_to_min_i32) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, // because INT32_MIN allows easier out-of-bounds detection. @@ -1392,7 +1392,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kLoong64Float64ToInt64: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); FPURegister scratch_d = kScratchDoubleReg; @@ -1438,7 +1438,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode()); __ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch); if (set_overflow_to_min_i32) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); // Avoid UINT32_MAX as an overflow indicator and use 0 instead, // because 0 allows easier out-of-bounds detection. @@ -1863,11 +1863,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( << "\""; \ UNIMPLEMENTED(); -void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, +void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm, Instruction* instr, FlagsCondition condition, Label* tlabel, Label* flabel, bool fallthru) { #undef __ -#define __ tasm-> +#define __ masm-> Loong64OperandConverter i(gen, instr); // LOONG64 does not have condition code flags, so compare and branch are @@ -1882,7 +1882,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, __ Branch(tlabel, cc, t8, Operand(zero_reg)); } else if (instr->arch_opcode() == kLoong64Add_d || instr->arch_opcode() == kLoong64Sub_d) { - UseScratchRegisterScope temps(tasm); + UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); Register scratch2 = temps.Acquire(); Condition cc = FlagsConditionToConditionOvf(condition); @@ -1941,7 +1941,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, } if (!fallthru) __ Branch(flabel); // no fallthru to flabel. #undef __ -#define __ tasm()-> +#define __ masm()-> } // Assembles branches after an instruction. @@ -1949,7 +1949,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { Label* tlabel = branch->true_label; Label* flabel = branch->false_label; - AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, + AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel, branch->fallthru); } @@ -2014,7 +2014,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, }; auto ool = zone()->New(this, instr); Label* tlabel = ool->entry(); - AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); + AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true); } #endif // V8_ENABLE_WEBASSEMBLY @@ -2041,7 +2041,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, return; } else if (instr->arch_opcode() == kLoong64Add_d || instr->arch_opcode() == kLoong64Sub_d) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); Condition cc = FlagsConditionToConditionOvf(condition); // Check for overflow creates 1 or 0 for result. @@ -2289,7 +2289,7 @@ void CodeGenerator::AssembleConstructFrame() { // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Ld_d(scratch, FieldMemOperand( kWasmInstanceRegister, @@ -2444,7 +2444,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) { __ Push(g.ToRegister(source)); frame_access_state()->IncreaseSPDelta(new_slots); } else if (source->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Ld_d(scratch, g.ToMemOperand(source)); __ Push(scratch); @@ -2467,7 +2467,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) { if (dest->IsRegister()) { __ Pop(g.ToRegister(dest)); } else if (dest->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Pop(scratch); __ St_d(scratch, g.ToMemOperand(dest)); @@ -2495,7 +2495,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source, MachineRepresentation rep) { // Must be kept in sync with {MoveTempLocationTo}. DCHECK(!source->IsImmediate()); - move_cycle_.temps.emplace(tasm()); + move_cycle_.temps.emplace(masm()); auto& temps = *move_cycle_.temps; // Temporarily exclude the reserved scratch registers while we pick one to // resolve the move cycle. Re-include them immediately afterwards as they @@ -2585,7 +2585,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest, void CodeGenerator::SetPendingMove(MoveOperands* move) { InstructionOperand* src = &move->source(); InstructionOperand* dst = &move->destination(); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); if (src->IsConstant() || (src->IsStackSlot() && dst->IsStackSlot())) { Register temp = temps.Acquire(); move_cycle_.scratch_regs.set(temp); @@ -2642,7 +2642,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, if (destination->IsRegister()) { __ Ld_d(g.ToRegister(destination), src); } else { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Ld_d(scratch, src); __ St_d(scratch, g.ToMemOperand(destination)); @@ -2650,7 +2650,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } else if (source->IsConstant()) { Constant src = g.ToConstant(source); if (destination->IsRegister() || destination->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); Register dst = destination->IsRegister() ? g.ToRegister(destination) : scratch; @@ -2697,7 +2697,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, if (base::bit_cast(src.ToFloat32()) == 0) { __ St_d(zero_reg, dst); } else { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ li(scratch, Operand(base::bit_cast(src.ToFloat32()))); __ St_d(scratch, dst); @@ -2748,7 +2748,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, // Dispatch on the source and destination operand kinds. Not all // combinations are possible. if (source->IsRegister()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); // Register-register. Register src = g.ToRegister(source); @@ -2770,7 +2770,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, // Since the Ld instruction may need a scratch reg, // we should not use both of the two scratch registers in // UseScratchRegisterScope here. - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); FPURegister scratch_d = kScratchDoubleReg; MemOperand src = g.ToMemOperand(source); @@ -2796,7 +2796,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, } } else if (source->IsFPStackSlot()) { DCHECK(destination->IsFPStackSlot()); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); FPURegister scratch_d = kScratchDoubleReg; MemOperand src = g.ToMemOperand(source); diff --git a/src/compiler/backend/loong64/instruction-selector-loong64.cc b/src/compiler/backend/loong64/instruction-selector-loong64.cc index b43bc630e9..6f7841be97 100644 --- a/src/compiler/backend/loong64/instruction-selector-loong64.cc +++ b/src/compiler/backend/loong64/instruction-selector-loong64.cc @@ -360,7 +360,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) { ptrdiff_t const delta = g.GetIntegerConstantValue(index) + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector->isolate(), m.ResolvedValue()); // Check that the delta is a 32-bit integer due to the limitations of // immediate operands. @@ -560,7 +560,7 @@ void InstructionSelector::VisitStore(Node* node) { CanAddressRelativeToRootsRegister(m.ResolvedValue())) { ptrdiff_t const delta = g.GetIntegerConstantValue(index) + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( isolate(), m.ResolvedValue()); // Check that the delta is a 32-bit integer due to the limitations of // immediate operands. diff --git a/src/compiler/backend/mips64/code-generator-mips64.cc b/src/compiler/backend/mips64/code-generator-mips64.cc index 29d1777720..8757782513 100644 --- a/src/compiler/backend/mips64/code-generator-mips64.cc +++ b/src/compiler/backend/mips64/code-generator-mips64.cc @@ -23,7 +23,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> // TODO(plind): consider renaming these macros. #define TRACE_MSG(msg) \ @@ -464,7 +464,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_IEEE754_BINOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2, kScratchReg); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -475,7 +475,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_IEEE754_UNOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 1, kScratchReg); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ @@ -504,7 +504,7 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void AdjustStackPointerForTailCall(TurboAssembler* tasm, +void AdjustStackPointerForTailCall(MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, bool allow_shrinkage = true) { @@ -512,10 +512,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, StandardFrameConstants::kFixedSlotCountAboveFp; int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { - tasm->Dsubu(sp, sp, stack_slot_delta * kSystemPointerSize); + masm->Dsubu(sp, sp, stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { - tasm->Daddu(sp, sp, -stack_slot_delta * kSystemPointerSize); + masm->Daddu(sp, sp, -stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } } @@ -524,13 +524,13 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } @@ -766,7 +766,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -1290,7 +1290,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kMips64ModD: { // TODO(bmeurer): We should really get rid of this special instruction, // and generate a CallAddress instruction instead. - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ PrepareCallCFunction(0, 2, kScratchReg); __ MovToFloatParameters(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); @@ -1771,7 +1771,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kMips64StoreToStackSlot: { if (instr->InputAt(0)->IsFPRegister()) { if (instr->InputAt(0)->IsSimd128Register()) { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1))); } else { __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1))); @@ -1790,13 +1790,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128LoadSplat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); auto sz = static_cast(MiscField::decode(instr->opcode())); __ LoadSplat(sz, i.OutputSimd128Register(), i.MemoryOperand()); break; } case kMips64S128Load8x8S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register scratch = kSimd128ScratchReg; __ Ld(kScratchReg, i.MemoryOperand()); @@ -1806,7 +1806,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load8x8U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ Ld(kScratchReg, i.MemoryOperand()); @@ -1815,7 +1815,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load16x4S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register scratch = kSimd128ScratchReg; __ Ld(kScratchReg, i.MemoryOperand()); @@ -1825,7 +1825,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load16x4U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ Ld(kScratchReg, i.MemoryOperand()); @@ -1834,7 +1834,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load32x2S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register scratch = kSimd128ScratchReg; __ Ld(kScratchReg, i.MemoryOperand()); @@ -1844,7 +1844,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load32x2U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ Ld(kScratchReg, i.MemoryOperand()); @@ -1853,7 +1853,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load32Zero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ xor_v(dst, dst, dst); __ Lwu(kScratchReg, i.MemoryOperand()); @@ -1861,7 +1861,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load64Zero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ xor_v(dst, dst, dst); __ Ld(kScratchReg, i.MemoryOperand()); @@ -1869,7 +1869,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128LoadLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); DCHECK_EQ(dst, i.InputSimd128Register(0)); auto sz = static_cast(MiscField::decode(instr->opcode())); @@ -1877,7 +1877,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128StoreLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); auto sz = static_cast(MiscField::decode(instr->opcode())); __ StoreLane(sz, src, i.InputUint8(1), i.MemoryOperand(2)); @@ -2055,7 +2055,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputRegister(0), Operand(i.InputRegister(1))); break; case kMips64S128Const: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); uint64_t imm1 = make_uint64(i.InputUint32(1), i.InputUint32(0)); uint64_t imm2 = make_uint64(i.InputUint32(3), i.InputUint32(2)); @@ -2066,30 +2066,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Zero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ xor_v(dst, dst, dst); break; } case kMips64S128AllOnes: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ ceq_d(dst, dst, dst); break; } case kMips64I32x4Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fill_w(i.OutputSimd128Register(), i.InputRegister(0)); break; } case kMips64I32x4ExtractLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); break; } case kMips64I32x4ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); Simd128Register dst = i.OutputSimd128Register(); if (src != dst) { @@ -2099,54 +2099,54 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F64x2Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ bclri_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63); break; } case kMips64F64x2Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ bnegi_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63); break; } case kMips64F64x2Sqrt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fsqrt_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64F64x2Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); ASSEMBLE_F64X2_ARITHMETIC_BINOP(fadd_d); break; } case kMips64F64x2Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); ASSEMBLE_F64X2_ARITHMETIC_BINOP(fsub_d); break; } case kMips64F64x2Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmul_d); break; } case kMips64F64x2Div: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); ASSEMBLE_F64X2_ARITHMETIC_BINOP(fdiv_d); break; } case kMips64F64x2Min: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -2169,7 +2169,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F64x2Max: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -2192,43 +2192,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F64x2Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F64x2Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fcune_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F64x2Lt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fclt_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F64x2Le: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fcle_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F64x2Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ Move(kScratchReg, i.InputDoubleRegister(0)); __ fill_d(i.OutputSimd128Register(), kScratchReg); break; } case kMips64F64x2ExtractLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_s_d(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1)); __ Move(i.OutputDoubleRegister(), kScratchReg); break; } case kMips64F64x2ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); Simd128Register dst = i.OutputSimd128Register(); __ Move(kScratchReg, i.InputDoubleRegister(2)); @@ -2239,18 +2239,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fill_d(i.OutputSimd128Register(), i.InputRegister(0)); break; } case kMips64I64x2ExtractLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_s_d(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); break; } case kMips64F64x2Pmin: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register lhs = i.InputSimd128Register(0); Simd128Register rhs = i.InputSimd128Register(1); @@ -2260,7 +2260,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F64x2Pmax: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register lhs = i.InputSimd128Register(0); Simd128Register rhs = i.InputSimd128Register(1); @@ -2270,31 +2270,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F64x2Ceil: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToPlusInf); break; } case kMips64F64x2Floor: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToMinusInf); break; } case kMips64F64x2Trunc: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToZero); break; } case kMips64F64x2NearestInt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToNearest); break; } case kMips64F64x2ConvertLowI32x4S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0)); __ slli_d(kSimd128RegZero, kSimd128RegZero, 32); @@ -2303,19 +2303,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F64x2ConvertLowI32x4U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0)); __ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero); break; } case kMips64F64x2PromoteLowF32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64I64x2ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); Simd128Register dst = i.OutputSimd128Register(); if (src != dst) { @@ -2325,32 +2325,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I64x2Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subv_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I64x2Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ mulv_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I64x2Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ subv_d(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I64x2Shl: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_d(kSimd128ScratchReg, i.InputRegister(1)); __ sll_d(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2362,7 +2362,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2ShrS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_d(kSimd128ScratchReg, i.InputRegister(1)); __ sra_d(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2374,7 +2374,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2ShrU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_d(kSimd128ScratchReg, i.InputRegister(1)); __ srl_d(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2386,7 +2386,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2BitMask: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Simd128Register src = i.InputSimd128Register(0); Simd128Register scratch0 = kSimd128RegZero; @@ -2399,13 +2399,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I64x2Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); __ nor_v(i.OutputSimd128Register(), i.OutputSimd128Register(), @@ -2413,26 +2413,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2GtS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I64x2GeS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I64x2Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ add_a_d(i.OutputSimd128Register(), i.InputSimd128Register(0), kSimd128RegZero); break; } case kMips64I64x2SConvertI32x4Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ ilvr_w(kSimd128ScratchReg, src, src); @@ -2441,7 +2441,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2SConvertI32x4High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ ilvl_w(kSimd128ScratchReg, src, src); @@ -2450,14 +2450,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2UConvertI32x4Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I64x2UConvertI32x4High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); @@ -2482,19 +2482,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ FmoveLow(kScratchReg, i.InputSingleRegister(0)); __ fill_w(i.OutputSimd128Register(), kScratchReg); break; } case kMips64F32x4ExtractLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1)); __ FmoveLow(i.OutputSingleRegister(), kScratchReg); break; } case kMips64F32x4ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); Simd128Register dst = i.OutputSimd128Register(); __ FmoveLow(kScratchReg, i.InputSingleRegister(2)); @@ -2505,48 +2505,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4SConvertI32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64F32x4UConvertI32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64I32x4Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4MaxS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4MinS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); __ nor_v(dst, dst, dst); break; } case kMips64I32x4Shl: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_w(kSimd128ScratchReg, i.InputRegister(1)); __ sll_w(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2558,7 +2558,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4ShrS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_w(kSimd128ScratchReg, i.InputRegister(1)); __ sra_w(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2570,7 +2570,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4ShrU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_w(kSimd128ScratchReg, i.InputRegister(1)); __ srl_w(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2582,26 +2582,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4MaxU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4MinU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64S128Select: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0)); __ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2), i.InputSimd128Register(1)); break; } case kMips64S128AndNot: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register scratch = kSimd128ScratchReg, dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), @@ -2611,41 +2611,41 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); break; } case kMips64F32x4Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); break; } case kMips64F32x4Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Div: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fdiv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Max: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -2668,7 +2668,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4Min: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -2691,31 +2691,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fcune_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Lt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Le: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Pmin: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register lhs = i.InputSimd128Register(0); Simd128Register rhs = i.InputSimd128Register(1); @@ -2725,7 +2725,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4Pmax: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register lhs = i.InputSimd128Register(0); Simd128Register rhs = i.InputSimd128Register(1); @@ -2735,91 +2735,91 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4Ceil: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToPlusInf); break; } case kMips64F32x4Floor: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToMinusInf); break; } case kMips64F32x4Trunc: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToZero); break; } case kMips64F32x4NearestInt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToNearest); break; } case kMips64F32x4DemoteF64x2Zero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I32x4SConvertF32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64I32x4UConvertF32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64F32x4Sqrt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64I32x4Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ subv_w(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I32x4GtS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I32x4GeS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I32x4GtU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I32x4GeU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I32x4Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ asub_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), kSimd128RegZero); break; } case kMips64I32x4BitMask: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Simd128Register src = i.InputSimd128Register(0); Simd128Register scratch0 = kSimd128RegZero; @@ -2834,13 +2834,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4DotI16x8S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ dotp_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4TruncSatF64x2SZero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0)); __ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31); @@ -2849,7 +2849,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4TruncSatF64x2UZero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0)); __ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31); @@ -2858,24 +2858,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fill_h(i.OutputSimd128Register(), i.InputRegister(0)); break; } case kMips64I16x8ExtractLaneU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_u_h(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); break; } case kMips64I16x8ExtractLaneS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); break; } case kMips64I16x8ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); Simd128Register dst = i.OutputSimd128Register(); if (src != dst) { @@ -2885,14 +2885,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ subv_h(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I16x8Shl: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_h(kSimd128ScratchReg, i.InputRegister(1)); __ sll_h(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2904,7 +2904,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8ShrS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_h(kSimd128ScratchReg, i.InputRegister(1)); __ sra_h(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2916,7 +2916,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8ShrU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_h(kSimd128ScratchReg, i.InputRegister(1)); __ srl_h(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2928,123 +2928,123 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8AddSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8SubSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8MaxS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8MinS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); __ nor_v(dst, dst, dst); break; } case kMips64I16x8GtS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I16x8GeS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I16x8AddSatU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8SubSatU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8MaxU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8MinU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8GtU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I16x8GeU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I16x8RoundingAverageU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ aver_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I16x8Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ asub_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), kSimd128RegZero); break; } case kMips64I16x8BitMask: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Simd128Register src = i.InputSimd128Register(0); Simd128Register scratch0 = kSimd128RegZero; @@ -3061,30 +3061,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8Q15MulRSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ mulr_q_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fill_b(i.OutputSimd128Register(), i.InputRegister(0)); break; } case kMips64I8x16ExtractLaneU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_u_b(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); break; } case kMips64I8x16ExtractLaneS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); break; } case kMips64I8x16ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); Simd128Register dst = i.OutputSimd128Register(); if (src != dst) { @@ -3094,14 +3094,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I8x16Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ subv_b(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I8x16Shl: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_b(kSimd128ScratchReg, i.InputRegister(1)); __ sll_b(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -3113,7 +3113,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I8x16ShrS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_b(kSimd128ScratchReg, i.InputRegister(1)); __ sra_b(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -3125,68 +3125,68 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I8x16Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16AddSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16SubSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16MaxS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16MinS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); __ nor_v(dst, dst, dst); break; } case kMips64I8x16GtS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I8x16GeS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I8x16ShrU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_b(kSimd128ScratchReg, i.InputRegister(1)); __ srl_b(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -3198,61 +3198,61 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I8x16AddSatU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16SubSatU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16MaxU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16MinU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16GtU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I8x16GeU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I8x16RoundingAverageU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ aver_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I8x16Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ asub_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), kSimd128RegZero); break; } case kMips64I8x16Popcnt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ pcnt_b(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64I8x16BitMask: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Simd128Register src = i.InputSimd128Register(0); Simd128Register scratch0 = kSimd128RegZero; @@ -3270,31 +3270,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128And: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64S128Or: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64S128Xor: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64S128Not: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(0)); break; } case kMips64V128AnyTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Label all_false; __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero, @@ -3305,7 +3305,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2AllTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Label all_true; __ BranchMSA(&all_true, MSA_BRANCH_D, all_not_zero, @@ -3316,7 +3316,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4AllTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Label all_true; __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero, @@ -3327,7 +3327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8AllTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Label all_true; __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero, @@ -3338,7 +3338,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I8x16AllTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Label all_true; __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero, @@ -3349,17 +3349,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64MsaLd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ld_b(i.OutputSimd128Register(), i.MemoryOperand()); break; } case kMips64MsaSt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ st_b(i.InputSimd128Register(2), i.MemoryOperand()); break; } case kMips64S32x4InterleaveRight: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3369,7 +3369,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S32x4InterleaveLeft: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3379,7 +3379,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S32x4PackEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3389,7 +3389,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S32x4PackOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3399,7 +3399,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S32x4InterleaveEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3409,7 +3409,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S32x4InterleaveOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3419,7 +3419,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S32x4Shuffle: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3473,7 +3473,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x8InterleaveRight: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3483,7 +3483,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x8InterleaveLeft: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3493,7 +3493,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x8PackEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3503,7 +3503,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x8PackOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3513,7 +3513,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x8InterleaveEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3523,7 +3523,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x8InterleaveOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3533,21 +3533,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x4Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3] // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B); break; } case kMips64S16x2Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1] // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1 __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1); break; } case kMips64S8x16InterleaveRight: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3557,7 +3557,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x16InterleaveLeft: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3567,7 +3567,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x16PackEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3577,7 +3577,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x16PackOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3587,7 +3587,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x16InterleaveEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3597,7 +3597,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x16InterleaveOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3607,14 +3607,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x16Concat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); DCHECK(dst == i.InputSimd128Register(0)); __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2)); break; } case kMips64I8x16Shuffle: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3650,7 +3650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x8Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7] // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1 @@ -3660,21 +3660,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x4Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3] // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B); break; } case kMips64S8x2Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1] // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1 __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1); break; } case kMips64I32x4SConvertI16x8Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ ilvr_h(kSimd128ScratchReg, src, src); @@ -3683,7 +3683,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4SConvertI16x8High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ ilvl_h(kSimd128ScratchReg, src, src); @@ -3692,21 +3692,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4UConvertI16x8Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I32x4UConvertI16x8High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I16x8SConvertI8x16Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ ilvr_b(kSimd128ScratchReg, src, src); @@ -3715,7 +3715,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8SConvertI8x16High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ ilvl_b(kSimd128ScratchReg, src, src); @@ -3724,7 +3724,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8SConvertI32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -3734,7 +3734,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8UConvertI32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -3747,21 +3747,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8UConvertI8x16Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I16x8UConvertI8x16High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I8x16SConvertI16x8: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -3771,7 +3771,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I8x16UConvertI16x8: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -3792,11 +3792,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( << "\""; \ UNIMPLEMENTED(); -void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, +void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm, Instruction* instr, FlagsCondition condition, Label* tlabel, Label* flabel, bool fallthru) { #undef __ -#define __ tasm-> +#define __ masm-> MipsOperandConverter i(gen, instr); // MIPS does not have condition code flags, so compare and branch are @@ -3867,7 +3867,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, } if (!fallthru) __ Branch(flabel); // no fallthru to flabel. #undef __ -#define __ tasm()-> +#define __ masm()-> } // Assembles branches after an instruction. @@ -3875,7 +3875,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { Label* tlabel = branch->true_label; Label* flabel = branch->false_label; - AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, + AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel, branch->fallthru); } @@ -3940,7 +3940,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, }; auto ool = zone()->New(this, instr); Label* tlabel = ool->entry(); - AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); + AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true); } #endif // V8_ENABLE_WEBASSEMBLY @@ -4384,7 +4384,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) { __ Push(g.ToRegister(source)); frame_access_state()->IncreaseSPDelta(new_slots); } else if (source->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Ld(scratch, g.ToMemOperand(source)); __ Push(scratch); @@ -4407,7 +4407,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) { if (dest->IsRegister()) { __ Pop(g.ToRegister(dest)); } else if (dest->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Pop(scratch); __ Sd(scratch, g.ToMemOperand(dest)); @@ -4435,7 +4435,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source, MachineRepresentation rep) { // Must be kept in sync with {MoveTempLocationTo}. DCHECK(!source->IsImmediate()); - move_cycle_.temps.emplace(tasm()); + move_cycle_.temps.emplace(masm()); auto& temps = *move_cycle_.temps; // Temporarily exclude the reserved scratch registers while we pick one to // resolve the move cycle. Re-include them immediately afterwards as they @@ -4481,7 +4481,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest, void CodeGenerator::SetPendingMove(MoveOperands* move) { InstructionOperand* src = &move->source(); InstructionOperand* dst = &move->destination(); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); if (src->IsConstant() && dst->IsFPLocationOperand()) { Register temp = temps.Acquire(); move_cycle_.scratch_regs.set(temp); @@ -4600,7 +4600,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } else if (source->IsFPRegister()) { MachineRepresentation rep = LocationOperand::cast(source)->representation(); if (rep == MachineRepresentation::kSimd128) { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); MSARegister src = g.ToSimd128Register(source); if (destination->IsSimd128Register()) { MSARegister dst = g.ToSimd128Register(destination); @@ -4624,7 +4624,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, MemOperand src = g.ToMemOperand(source); MachineRepresentation rep = LocationOperand::cast(source)->representation(); if (rep == MachineRepresentation::kSimd128) { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (destination->IsSimd128Register()) { __ ld_b(g.ToSimd128Register(destination), src); } else { @@ -4682,7 +4682,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, } else if (source->IsFPRegister()) { MachineRepresentation rep = LocationOperand::cast(source)->representation(); if (rep == MachineRepresentation::kSimd128) { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); MSARegister temp = kSimd128ScratchReg; MSARegister src = g.ToSimd128Register(source); if (destination->IsSimd128Register()) { @@ -4722,7 +4722,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, MemOperand dst1(dst0.rm(), dst0.offset() + kInt64Size); MachineRepresentation rep = LocationOperand::cast(source)->representation(); if (rep == MachineRepresentation::kSimd128) { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); MSARegister temp_1 = kSimd128ScratchReg; __ ld_b(temp_1, dst0); // Save destination in temp_1. __ Ld(temp_0, src0); // Then use temp_0 to copy source to destination. diff --git a/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/src/compiler/backend/mips64/instruction-scheduler-mips64.cc index 1d17d4bd58..af0746622f 100644 --- a/src/compiler/backend/mips64/instruction-scheduler-mips64.cc +++ b/src/compiler/backend/mips64/instruction-scheduler-mips64.cc @@ -775,7 +775,7 @@ int PrepareForTailCallLatency() { int AssertLatency() { return 1; } int PrepareCallCFunctionLatency() { - int frame_alignment = TurboAssembler::ActivationFrameAlignment(); + int frame_alignment = MacroAssembler::ActivationFrameAlignment(); if (frame_alignment > kSystemPointerSize) { return 1 + DsubuLatency(false) + AndLatency(false) + 1; } else { diff --git a/src/compiler/backend/ppc/code-generator-ppc.cc b/src/compiler/backend/ppc/code-generator-ppc.cc index d0f90150f2..67d02cddbe 100644 --- a/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/src/compiler/backend/ppc/code-generator-ppc.cc @@ -23,7 +23,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> #define kScratchReg r11 @@ -170,7 +170,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { } void Generate() final { - ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); + ConstantPoolUnavailableScope constant_pool_unavailable(masm()); if (COMPRESS_POINTERS_BOOL) { __ DecompressTaggedPointer(value_, value_); } @@ -409,7 +409,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { #define ASSEMBLE_FLOAT_MODULO() \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2, kScratchReg); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -422,7 +422,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { do { \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* and generate a CallAddress instruction instead. */ \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 1, kScratchReg); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ @@ -435,7 +435,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { do { \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* and generate a CallAddress instruction instead. */ \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2, kScratchReg); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -680,20 +680,20 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void FlushPendingPushRegisters(TurboAssembler* tasm, +void FlushPendingPushRegisters(MacroAssembler* masm, FrameAccessState* frame_access_state, ZoneVector* pending_pushes) { switch (pending_pushes->size()) { case 0: break; case 1: - tasm->Push((*pending_pushes)[0]); + masm->Push((*pending_pushes)[0]); break; case 2: - tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]); + masm->Push((*pending_pushes)[0], (*pending_pushes)[1]); break; case 3: - tasm->Push((*pending_pushes)[0], (*pending_pushes)[1], + masm->Push((*pending_pushes)[0], (*pending_pushes)[1], (*pending_pushes)[2]); break; default: @@ -704,7 +704,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm, } void AdjustStackPointerForTailCall( - TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp, + MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, ZoneVector* pending_pushes = nullptr, bool allow_shrinkage = true) { int current_sp_offset = state->GetSPToFPSlotCount() + @@ -712,15 +712,15 @@ void AdjustStackPointerForTailCall( int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { if (pending_pushes != nullptr) { - FlushPendingPushRegisters(tasm, state, pending_pushes); + FlushPendingPushRegisters(masm, state, pending_pushes); } - tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0); + masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { if (pending_pushes != nullptr) { - FlushPendingPushRegisters(tasm, state, pending_pushes); + FlushPendingPushRegisters(masm, state, pending_pushes); } - tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0); + masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0); state->IncreaseSPDelta(stack_slot_delta); } } @@ -742,7 +742,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, LocationOperand::cast(move->destination())); InstructionOperand source(move->source()); AdjustStackPointerForTailCall( - tasm(), frame_access_state(), + masm(), frame_access_state(), destination_location.index() - pending_pushes.size(), &pending_pushes); // Pushes of non-register data types are not supported. @@ -752,20 +752,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, // TODO(arm): We can push more than 3 registers at once. Add support in // the macro-assembler for pushing a list of registers. if (pending_pushes.size() == 3) { - FlushPendingPushRegisters(tasm(), frame_access_state(), + FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes); } move->Eliminate(); } - FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes); + FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes); } - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, nullptr, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } @@ -810,7 +810,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( switch (opcode) { case kArchCallCodeObject: { v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool( - tasm()); + masm()); if (HasRegisterInput(instr, 0)) { Register reg = i.InputRegister(0); DCHECK_IMPLIES( @@ -883,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } else { // We cannot use the constant pool to load the target since // we've already restored the caller's frame. - ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); + ConstantPoolUnavailableScope constant_pool_unavailable(masm()); __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); } DCHECK_EQ(LeaveRC, i.OutputRCBit()); @@ -904,7 +904,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallJSFunction: { v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool( - tasm()); + masm()); Register func = i.InputRegister(0); if (v8_flags.debug_code) { // Check the function's context matches the context argument. @@ -1058,7 +1058,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -3320,7 +3320,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { AssembleDeconstructFrame(); } // Constant pool is unavailable since the frame has been destructed - ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); + ConstantPoolUnavailableScope constant_pool_unavailable(masm()); if (drop_jsargs) { // We must pop all arguments from the stack (including the receiver). // The number of arguments without the receiver is @@ -3334,8 +3334,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { __ mov(argc_reg, Operand(parameter_slots)); __ bind(&skip); } - __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc_reg, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } else if (additional_pop_count->IsImmediate()) { int additional_count = g.ToConstant(additional_pop_count).ToInt32(); __ Drop(parameter_slots + additional_count); @@ -3391,7 +3391,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) { frame_access_state()->IncreaseSPDelta(-new_slots); PPCOperandConverter g(this, nullptr); if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Pop(scratch); __ StoreU64(scratch, g.ToMemOperand(dest), r0); diff --git a/src/compiler/backend/riscv/code-generator-riscv.cc b/src/compiler/backend/riscv/code-generator-riscv.cc index 624ef0ac81..ebf6b94101 100644 --- a/src/compiler/backend/riscv/code-generator-riscv.cc +++ b/src/compiler/backend/riscv/code-generator-riscv.cc @@ -19,7 +19,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> // TODO(plind): consider renaming these macros. #define TRACE_MSG(msg) \ @@ -334,7 +334,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr, external) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); \ __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \ __ PrepareCallCFunction(3, 0, kScratchReg); \ @@ -344,7 +344,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr, external) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); \ __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \ __ PrepareCallCFunction(3, 0, kScratchReg); \ @@ -473,7 +473,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_IEEE754_BINOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2, kScratchReg); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -484,7 +484,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_IEEE754_UNOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 1, kScratchReg); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ @@ -582,7 +582,7 @@ void CodeGenerator::AssembleArchSelect(Instruction* instr, namespace { -void AdjustStackPointerForTailCall(TurboAssembler* tasm, +void AdjustStackPointerForTailCall(MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, bool allow_shrinkage = true) { @@ -590,10 +590,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, StandardFrameConstants::kFixedSlotCountAboveFp; int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { - tasm->SubWord(sp, sp, stack_slot_delta * kSystemPointerSize); + masm->SubWord(sp, sp, stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { - tasm->AddWord(sp, sp, -stack_slot_delta * kSystemPointerSize); + masm->AddWord(sp, sp, -stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } } @@ -602,13 +602,13 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } @@ -829,7 +829,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -1295,7 +1295,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kRiscvModS: { // TODO(bmeurer): We should really get rid of this special instruction, // and generate a CallAddress instruction instead. - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ PrepareCallCFunction(0, 2, kScratchReg); __ MovToFloatParameters(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); @@ -1425,7 +1425,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kRiscvModD: { // TODO(bmeurer): We should really get rid of this special instruction, // and generate a CallAddress instruction instead. - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ PrepareCallCFunction(0, 2, kScratchReg); __ MovToFloatParameters(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); @@ -1940,7 +1940,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; #if V8_TARGET_ARCH_RISCV32 case kRiscvWord32AtomicPairLoad: { - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); __ PrepareCallCFunction(1, 0, kScratchReg); @@ -1949,7 +1949,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kRiscvWord32AtomicPairStore: { - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); __ PushCallerSaved(SaveFPRegsMode::kIgnore); __ PrepareCallCFunction(3, 0, kScratchReg); @@ -1972,7 +1972,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair, atomic_pair_or_function) ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair, atomic_pair_xor_function) case kRiscvWord32AtomicPairExchange: { - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); __ PrepareCallCFunction(3, 0, kScratchReg); __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); @@ -1982,7 +1982,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kRiscvWord32AtomicPairCompareExchange: { - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); __ PrepareCallCFunction(5, 0, kScratchReg); __ add(a0, i.InputRegister(0), i.InputRegister(1)); @@ -3711,11 +3711,11 @@ bool IsInludeEqual(Condition cc) { } } -void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, +void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm, Instruction* instr, FlagsCondition condition, Label* tlabel, Label* flabel, bool fallthru) { #undef __ -#define __ tasm-> +#define __ masm-> RiscvOperandConverter i(gen, instr); // RISC-V does not have condition code flags, so compare and branch are @@ -3806,7 +3806,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, } if (!fallthru) __ Branch(flabel); // no fallthru to flabel. #undef __ -#define __ tasm()-> +#define __ masm()-> } // Assembles branches after an instruction. @@ -3814,7 +3814,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { Label* tlabel = branch->true_label; Label* flabel = branch->false_label; - AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, + AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel, branch->fallthru); } @@ -3878,7 +3878,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, }; auto ool = zone()->New(this, instr); Label* tlabel = ool->entry(); - AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); + AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true); } // Assembles boolean materializations after an instruction. @@ -4373,7 +4373,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source, MachineRepresentation rep) { // Must be kept in sync with {MoveTempLocationTo}. DCHECK(!source->IsImmediate()); - move_cycle_.temps.emplace(tasm()); + move_cycle_.temps.emplace(masm()); auto& temps = *move_cycle_.temps; // Temporarily exclude the reserved scratch registers while we pick one to // resolve the move cycle. Re-include them immediately afterwards as they @@ -4419,7 +4419,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest, void CodeGenerator::SetPendingMove(MoveOperands* move) { InstructionOperand* src = &move->source(); InstructionOperand* dst = &move->destination(); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); if (src->IsConstant() && dst->IsFPLocationOperand()) { Register temp = temps.Acquire(); move_cycle_.scratch_regs.set(temp); @@ -4748,7 +4748,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, } } #endif - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp_0 = kScratchReg; Register temp_1 = kScratchReg2; __ LoadWord(temp_0, src); @@ -4775,7 +4775,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) { __ Push(g.ToRegister(source)); frame_access_state()->IncreaseSPDelta(new_slots); } else if (source->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ LoadWord(scratch, g.ToMemOperand(source)); __ Push(scratch); @@ -4798,7 +4798,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) { if (dest->IsRegister()) { __ Pop(g.ToRegister(dest)); } else if (dest->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Pop(scratch); __ StoreWord(scratch, g.ToMemOperand(dest)); diff --git a/src/compiler/backend/riscv/instruction-scheduler-riscv.cc b/src/compiler/backend/riscv/instruction-scheduler-riscv.cc index ea9e603920..67c0c6fbce 100644 --- a/src/compiler/backend/riscv/instruction-scheduler-riscv.cc +++ b/src/compiler/backend/riscv/instruction-scheduler-riscv.cc @@ -744,7 +744,7 @@ int AssemblePopArgumentsAdoptFrameLatency() { int AssertLatency() { return 1; } int PrepareCallCFunctionLatency() { - int frame_alignment = TurboAssembler::ActivationFrameAlignment(); + int frame_alignment = MacroAssembler::ActivationFrameAlignment(); if (frame_alignment > kSystemPointerSize) { return 1 + Sub64Latency(false) + AndLatency(false) + 1; } else { diff --git a/src/compiler/backend/riscv/instruction-selector-riscv32.cc b/src/compiler/backend/riscv/instruction-selector-riscv32.cc index a8db8248b3..6f14d959e2 100644 --- a/src/compiler/backend/riscv/instruction-selector-riscv32.cc +++ b/src/compiler/backend/riscv/instruction-selector-riscv32.cc @@ -65,7 +65,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) { ptrdiff_t const delta = g.GetIntegerConstantValue(index) + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector->isolate(), m.ResolvedValue()); // Check that the delta is a 32-bit integer due to the limitations of // immediate operands. diff --git a/src/compiler/backend/riscv/instruction-selector-riscv64.cc b/src/compiler/backend/riscv/instruction-selector-riscv64.cc index 83f5b5ecb4..cc4d863de0 100644 --- a/src/compiler/backend/riscv/instruction-selector-riscv64.cc +++ b/src/compiler/backend/riscv/instruction-selector-riscv64.cc @@ -168,7 +168,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) { ptrdiff_t const delta = g.GetIntegerConstantValue(index) + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector->isolate(), m.ResolvedValue()); // Check that the delta is a 32-bit integer due to the limitations of // immediate operands. diff --git a/src/compiler/backend/s390/code-generator-s390.cc b/src/compiler/backend/s390/code-generator-s390.cc index 93b240a863..80bd7ec9f8 100644 --- a/src/compiler/backend/s390/code-generator-s390.cc +++ b/src/compiler/backend/s390/code-generator-s390.cc @@ -22,7 +22,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> #define kScratchReg ip @@ -619,7 +619,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) { #define ASSEMBLE_FLOAT_MODULO() \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2, kScratchReg); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -631,7 +631,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) { do { \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* and generate a CallAddress instruction instead. */ \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 1, kScratchReg); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ @@ -643,7 +643,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) { do { \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* and generate a CallAddress instruction instead. */ \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2, kScratchReg); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -1021,20 +1021,20 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void FlushPendingPushRegisters(TurboAssembler* tasm, +void FlushPendingPushRegisters(MacroAssembler* masm, FrameAccessState* frame_access_state, ZoneVector* pending_pushes) { switch (pending_pushes->size()) { case 0: break; case 1: - tasm->Push((*pending_pushes)[0]); + masm->Push((*pending_pushes)[0]); break; case 2: - tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]); + masm->Push((*pending_pushes)[0], (*pending_pushes)[1]); break; case 3: - tasm->Push((*pending_pushes)[0], (*pending_pushes)[1], + masm->Push((*pending_pushes)[0], (*pending_pushes)[1], (*pending_pushes)[2]); break; default: @@ -1045,7 +1045,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm, } void AdjustStackPointerForTailCall( - TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp, + MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, ZoneVector* pending_pushes = nullptr, bool allow_shrinkage = true) { int current_sp_offset = state->GetSPToFPSlotCount() + @@ -1053,15 +1053,15 @@ void AdjustStackPointerForTailCall( int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { if (pending_pushes != nullptr) { - FlushPendingPushRegisters(tasm, state, pending_pushes); + FlushPendingPushRegisters(masm, state, pending_pushes); } - tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); + masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { if (pending_pushes != nullptr) { - FlushPendingPushRegisters(tasm, state, pending_pushes); + FlushPendingPushRegisters(masm, state, pending_pushes); } - tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); + masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); state->IncreaseSPDelta(stack_slot_delta); } } @@ -1083,7 +1083,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, LocationOperand::cast(move->destination())); InstructionOperand source(move->source()); AdjustStackPointerForTailCall( - tasm(), frame_access_state(), + masm(), frame_access_state(), destination_location.index() - pending_pushes.size(), &pending_pushes); // Pushes of non-register data types are not supported. @@ -1093,20 +1093,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, // TODO(arm): We can push more than 3 registers at once. Add support in // the macro-assembler for pushing a list of registers. if (pending_pushes.size() == 3) { - FlushPendingPushRegisters(tasm(), frame_access_state(), + FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes); } move->Eliminate(); } - FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes); + FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes); } - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, nullptr, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } @@ -1218,7 +1218,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } else { // We cannot use the constant pool to load the target since // we've already restored the caller's frame. - ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); + ConstantPoolUnavailableScope constant_pool_unavailable(masm()); __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); } frame_access_state()->ClearSPDelta(); @@ -1351,7 +1351,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -3580,9 +3580,9 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { __ mov(argc_reg, Operand(parameter_slots)); __ bind(&skip); } - __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger, + __ DropArguments(argc_reg, MacroAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIncludesReceiver); } else if (additional_pop_count->IsImmediate()) { int additional_count = g.ToConstant(additional_pop_count).ToInt32(); __ Drop(parameter_slots + additional_count); diff --git a/src/compiler/backend/x64/code-generator-x64.cc b/src/compiler/backend/x64/code-generator-x64.cc index 7e8a211e5f..d457c24029 100644 --- a/src/compiler/backend/x64/code-generator-x64.cc +++ b/src/compiler/backend/x64/code-generator-x64.cc @@ -33,7 +33,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> // Adds X64 specific methods for decoding operands. class X64OperandConverter : public InstructionOperandConverter { @@ -334,29 +334,29 @@ class OutOfLineRecordWrite final : public OutOfLineCode { }; template -int EmitStore(TurboAssembler* tasm, Operand operand, Register value, - MachineRepresentation rep) { +int EmitStore(MacroAssembler* masm, Operand operand, Register value, + MachineRepresentation rep) { int store_instr_offset; if (order == std::memory_order_relaxed) { - store_instr_offset = tasm->pc_offset(); + store_instr_offset = masm->pc_offset(); switch (rep) { case MachineRepresentation::kWord8: - tasm->movb(operand, value); + masm->movb(operand, value); break; case MachineRepresentation::kWord16: - tasm->movw(operand, value); + masm->movw(operand, value); break; case MachineRepresentation::kWord32: - tasm->movl(operand, value); + masm->movl(operand, value); break; case MachineRepresentation::kWord64: - tasm->movq(operand, value); + masm->movq(operand, value); break; case MachineRepresentation::kTagged: - tasm->StoreTaggedField(operand, value); + masm->StoreTaggedField(operand, value); break; case MachineRepresentation::kSandboxedPointer: - tasm->StoreSandboxedPointerField(operand, value); + masm->StoreSandboxedPointerField(operand, value); break; default: UNREACHABLE(); @@ -367,28 +367,28 @@ int EmitStore(TurboAssembler* tasm, Operand operand, Register value, DCHECK_EQ(order, std::memory_order_seq_cst); switch (rep) { case MachineRepresentation::kWord8: - tasm->movq(kScratchRegister, value); - store_instr_offset = tasm->pc_offset(); - tasm->xchgb(kScratchRegister, operand); + masm->movq(kScratchRegister, value); + store_instr_offset = masm->pc_offset(); + masm->xchgb(kScratchRegister, operand); break; case MachineRepresentation::kWord16: - tasm->movq(kScratchRegister, value); - store_instr_offset = tasm->pc_offset(); - tasm->xchgw(kScratchRegister, operand); + masm->movq(kScratchRegister, value); + store_instr_offset = masm->pc_offset(); + masm->xchgw(kScratchRegister, operand); break; case MachineRepresentation::kWord32: - tasm->movq(kScratchRegister, value); - store_instr_offset = tasm->pc_offset(); - tasm->xchgl(kScratchRegister, operand); + masm->movq(kScratchRegister, value); + store_instr_offset = masm->pc_offset(); + masm->xchgl(kScratchRegister, operand); break; case MachineRepresentation::kWord64: - tasm->movq(kScratchRegister, value); - store_instr_offset = tasm->pc_offset(); - tasm->xchgq(kScratchRegister, operand); + masm->movq(kScratchRegister, value); + store_instr_offset = masm->pc_offset(); + masm->xchgq(kScratchRegister, operand); break; case MachineRepresentation::kTagged: - store_instr_offset = tasm->pc_offset(); - tasm->AtomicStoreTaggedField(operand, value); + store_instr_offset = masm->pc_offset(); + masm->AtomicStoreTaggedField(operand, value); break; default: UNREACHABLE(); @@ -397,29 +397,29 @@ int EmitStore(TurboAssembler* tasm, Operand operand, Register value, } template -int EmitStore(TurboAssembler* tasm, Operand operand, Immediate value, - MachineRepresentation rep); +int EmitStore(MacroAssembler* masm, Operand operand, Immediate value, + MachineRepresentation rep); template <> -int EmitStore(TurboAssembler* tasm, Operand operand, - Immediate value, - MachineRepresentation rep) { - int store_instr_offset = tasm->pc_offset(); +int EmitStore(MacroAssembler* masm, Operand operand, + Immediate value, + MachineRepresentation rep) { + int store_instr_offset = masm->pc_offset(); switch (rep) { case MachineRepresentation::kWord8: - tasm->movb(operand, value); + masm->movb(operand, value); break; case MachineRepresentation::kWord16: - tasm->movw(operand, value); + masm->movw(operand, value); break; case MachineRepresentation::kWord32: - tasm->movl(operand, value); + masm->movl(operand, value); break; case MachineRepresentation::kWord64: - tasm->movq(operand, value); + masm->movq(operand, value); break; case MachineRepresentation::kTagged: - tasm->StoreTaggedField(operand, value); + masm->StoreTaggedField(operand, value); break; default: UNREACHABLE(); @@ -509,7 +509,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, #endif // V8_ENABLE_WEBASSEMBLY #ifdef V8_IS_TSAN -void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm, +void EmitMemoryProbeForTrapHandlerIfNeeded(MacroAssembler* masm, Register scratch, Operand operand, StubCallMode mode, int size) { #if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED @@ -522,16 +522,16 @@ void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm, mode == StubCallMode::kCallWasmRuntimeStub) { switch (size) { case kInt8Size: - tasm->movb(scratch, operand); + masm->movb(scratch, operand); break; case kInt16Size: - tasm->movw(scratch, operand); + masm->movw(scratch, operand); break; case kInt32Size: - tasm->movl(scratch, operand); + masm->movl(scratch, operand); break; case kInt64Size: - tasm->movq(scratch, operand); + masm->movq(scratch, operand); break; default: UNREACHABLE(); @@ -569,14 +569,14 @@ class OutOfLineTSANStore : public OutOfLineCode { // A direct call to a wasm runtime stub defined in this module. // Just encode the stub index. This will be patched when the code // is added to the native module and copied into wasm code space. - tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_, + masm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_, StubCallMode::kCallWasmRuntimeStub, memory_order_); return; } #endif // V8_ENABLE_WEBASSEMBLY - tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_, + masm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_, StubCallMode::kCallBuiltinPointer, memory_order_); } @@ -592,7 +592,7 @@ class OutOfLineTSANStore : public OutOfLineCode { Zone* zone_; }; -void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm, +void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, MacroAssembler* masm, Operand operand, Register value_reg, X64OperandConverter& i, StubCallMode mode, int size, std::memory_order order) { @@ -606,45 +606,45 @@ void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm, Register scratch0 = i.TempRegister(0); auto tsan_ool = zone->New(codegen, operand, value_reg, scratch0, mode, size, order); - tasm->jmp(tsan_ool->entry()); - tasm->bind(tsan_ool->exit()); + masm->jmp(tsan_ool->entry()); + masm->bind(tsan_ool->exit()); } template -Register GetTSANValueRegister(TurboAssembler* tasm, Register value, +Register GetTSANValueRegister(MacroAssembler* masm, Register value, X64OperandConverter& i, MachineRepresentation rep) { if (rep == MachineRepresentation::kSandboxedPointer) { // SandboxedPointers need to be encoded. Register value_reg = i.TempRegister(1); - tasm->movq(value_reg, value); - tasm->EncodeSandboxedPointer(value_reg); + masm->movq(value_reg, value); + masm->EncodeSandboxedPointer(value_reg); return value_reg; } return value; } template -Register GetTSANValueRegister(TurboAssembler* tasm, Immediate value, +Register GetTSANValueRegister(MacroAssembler* masm, Immediate value, X64OperandConverter& i, MachineRepresentation rep); template <> Register GetTSANValueRegister( - TurboAssembler* tasm, Immediate value, X64OperandConverter& i, + MacroAssembler* masm, Immediate value, X64OperandConverter& i, MachineRepresentation rep) { Register value_reg = i.TempRegister(1); - tasm->movq(value_reg, value); + masm->movq(value_reg, value); if (rep == MachineRepresentation::kSandboxedPointer) { // SandboxedPointers need to be encoded. - tasm->EncodeSandboxedPointer(value_reg); + masm->EncodeSandboxedPointer(value_reg); } return value_reg; } template void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen, - TurboAssembler* tasm, Operand operand, ValueT value, + MacroAssembler* masm, Operand operand, ValueT value, X64OperandConverter& i, StubCallMode stub_call_mode, MachineRepresentation rep, Instruction* instr) { // The FOR_TESTING code doesn't initialize the root register. We can't call @@ -654,17 +654,17 @@ void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen, // path. It is not crucial, but it would be nice to remove this restriction. if (codegen->code_kind() != CodeKind::FOR_TESTING) { if (instr->HasMemoryAccessMode()) { - EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), - instr, tasm->pc_offset()); + EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), instr, + masm->pc_offset()); } int size = ElementSizeInBytes(rep); - EmitMemoryProbeForTrapHandlerIfNeeded(tasm, i.TempRegister(0), operand, + EmitMemoryProbeForTrapHandlerIfNeeded(masm, i.TempRegister(0), operand, stub_call_mode, size); - Register value_reg = GetTSANValueRegister(tasm, value, i, rep); - EmitTSANStoreOOL(zone, codegen, tasm, operand, value_reg, i, stub_call_mode, + Register value_reg = GetTSANValueRegister(masm, value, i, rep); + EmitTSANStoreOOL(zone, codegen, masm, operand, value_reg, i, stub_call_mode, size, order); } else { - int store_instr_offset = EmitStore(tasm, operand, value, rep); + int store_instr_offset = EmitStore(masm, operand, value, rep); if (instr->HasMemoryAccessMode()) { EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), instr, store_instr_offset); @@ -718,7 +718,7 @@ class OutOfLineTSANRelaxedLoad final : public OutOfLineCode { }; void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, - TurboAssembler* tasm, Operand operand, + MacroAssembler* masm, Operand operand, X64OperandConverter& i, StubCallMode mode, int size) { // The FOR_TESTING code doesn't initialize the root register. We can't call @@ -731,26 +731,26 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, Register scratch0 = i.TempRegister(0); auto tsan_ool = zone->New(codegen, operand, scratch0, mode, size); - tasm->jmp(tsan_ool->entry()); - tasm->bind(tsan_ool->exit()); + masm->jmp(tsan_ool->entry()); + masm->bind(tsan_ool->exit()); } #else template void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen, - TurboAssembler* tasm, Operand operand, ValueT value, + MacroAssembler* masm, Operand operand, ValueT value, X64OperandConverter& i, StubCallMode stub_call_mode, MachineRepresentation rep, Instruction* instr) { DCHECK(order == std::memory_order_relaxed || order == std::memory_order_seq_cst); - int store_instr_off = EmitStore(tasm, operand, value, rep); + int store_instr_off = EmitStore(masm, operand, value, rep); if (instr->HasMemoryAccessMode()) { EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), instr, store_instr_off); } } void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, - TurboAssembler* tasm, Operand operand, + MacroAssembler* masm, Operand operand, X64OperandConverter& i, StubCallMode mode, int size) {} #endif // V8_IS_TSAN @@ -923,7 +923,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, #define ASSEMBLE_AVX_BINOP(asm_instr) \ do { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ if (HasAddressingMode(instr)) { \ size_t index = 1; \ Operand right = i.MemoryOperand(&index); \ @@ -983,7 +983,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, #define ASSEMBLE_SIMD_BINOP(opcode) \ do { \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \ i.InputSimd128Register(1)); \ } else { \ @@ -1015,7 +1015,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, XMMRegister dst = i.OutputSimd128Register(); \ byte input_index = instr->InputCount() == 2 ? 1 : 0; \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ DCHECK(instr->InputAt(input_index)->IsSimd128Register()); \ __ v##opcode(dst, i.InputSimd128Register(0), \ i.InputSimd128Register(input_index)); \ @@ -1030,7 +1030,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, XMMRegister dst = i.OutputSimd128Register(); \ XMMRegister src = i.InputSimd128Register(0); \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ DCHECK(instr->InputAt(1)->IsSimd128Register()); \ __ v##opcode(dst, src, i.InputSimd128Register(1), imm); \ } else { \ @@ -1061,7 +1061,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, XMMRegister dst = i.OutputSimd128Register(); \ if (HasImmediateInput(instr, 1)) { \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##opcode(dst, i.InputSimd128Register(0), \ byte{i.InputInt##width(1)}); \ } else { \ @@ -1074,7 +1074,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, __ andq(kScratchRegister, Immediate(mask)); \ __ Movq(kScratchDoubleReg, kScratchRegister); \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##opcode(dst, i.InputSimd128Register(0), kScratchDoubleReg); \ } else { \ DCHECK_EQ(dst, i.InputSimd128Register(0)); \ @@ -1102,13 +1102,13 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, EmitOOLTrapIfNeeded(zone(), this, opcode, instr, load_offset); \ } while (false) -#define ASSEMBLE_SEQ_CST_STORE(rep) \ - do { \ - Register value = i.InputRegister(0); \ - Operand operand = i.MemoryOperand(1); \ - EmitTSANAwareStore( \ - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), \ - rep, instr); \ +#define ASSEMBLE_SEQ_CST_STORE(rep) \ + do { \ + Register value = i.InputRegister(0); \ + Operand operand = i.MemoryOperand(1); \ + EmitTSANAwareStore( \ + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), rep, \ + instr); \ } while (false) void CodeGenerator::AssembleDeconstructFrame() { @@ -1127,7 +1127,7 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { void AdjustStackPointerForTailCall(Instruction* instr, - TurboAssembler* assembler, Linkage* linkage, + MacroAssembler* assembler, Linkage* linkage, OptimizedCompilationInfo* info, FrameAccessState* state, int new_slot_above_sp, @@ -1163,7 +1163,7 @@ void AdjustStackPointerForTailCall(Instruction* instr, } } -void SetupSimdImmediateInRegister(TurboAssembler* assembler, uint32_t* imms, +void SetupSimdImmediateInRegister(MacroAssembler* assembler, uint32_t* imms, XMMRegister reg) { assembler->Move(reg, make_uint64(imms[3], imms[2]), make_uint64(imms[1], imms[0])); @@ -1186,7 +1186,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, LocationOperand destination_location( LocationOperand::cast(move->destination())); InstructionOperand source(move->source()); - AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(), + AdjustStackPointerForTailCall(instr, masm(), linkage(), info(), frame_access_state(), destination_location.index()); if (source.IsStackSlot()) { @@ -1205,14 +1205,14 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, move->Eliminate(); } } - AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(), + AdjustStackPointerForTailCall(instr, masm(), linkage(), info(), frame_access_state(), first_unused_slot_offset, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(), + AdjustStackPointerForTailCall(instr, masm(), linkage(), info(), frame_access_state(), first_unused_slot_offset); } @@ -1464,7 +1464,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(BUILTIN_CODE(isolate(), AbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -1561,12 +1561,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DetermineStubCallMode()); if (arch_opcode == kArchStoreWithWriteBarrier) { EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kTagged, instr); } else { DCHECK_EQ(arch_opcode, kArchAtomicStoreWithWriteBarrier); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kTagged, instr); } if (mode > RecordWriteMode::kValueIsPointer) { @@ -1873,7 +1873,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_SSE_UNOP(Cvtss2sd); break; case kSSEFloat32Round: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); RoundingMode const mode = static_cast(MiscField::decode(instr->opcode())); __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); @@ -1930,7 +1930,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // The following 2 instruction implicitly use rax. __ fnstsw_ax(); if (CpuFeatures::IsSupported(SAHF)) { - CpuFeatureScope sahf_scope(tasm(), SAHF); + CpuFeatureScope sahf_scope(masm(), SAHF); __ sahf(); } else { __ shrl(rax, Immediate(8)); @@ -2066,7 +2066,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_SSE_UNOP(Sqrtsd); break; case kSSEFloat64Round: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); RoundingMode const mode = static_cast(MiscField::decode(instr->opcode())); __ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); @@ -2389,7 +2389,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; case kAVXFloat32Cmp: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); if (instr->InputAt(1)->IsFPRegister()) { __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); } else { @@ -2413,7 +2413,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister()); break; case kAVXFloat64Cmp: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); if (instr->InputAt(1)->IsFPRegister()) { __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); } else { @@ -2487,12 +2487,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (HasImmediateInput(instr, index)) { Immediate value(Immediate(i.InputInt8(index))); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord8, instr); } else { Register value(i.InputRegister(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord8, instr); } break; @@ -2522,12 +2522,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (HasImmediateInput(instr, index)) { Immediate value(Immediate(i.InputInt16(index))); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord16, instr); } else { Register value(i.InputRegister(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord16, instr); } break; @@ -2538,7 +2538,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (HasAddressingMode(instr)) { Operand address(i.MemoryOperand()); __ movl(i.OutputRegister(), address); - EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, + EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i, DetermineStubCallMode(), kInt32Size); } else { if (HasRegisterInput(instr, 0)) { @@ -2554,12 +2554,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (HasImmediateInput(instr, index)) { Immediate value(i.InputImmediate(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord32, instr); } else { Register value(i.InputRegister(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord32, instr); } } @@ -2572,7 +2572,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CHECK(instr->HasOutput()); Operand address(i.MemoryOperand()); __ DecompressTaggedSigned(i.OutputRegister(), address); - EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, + EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i, DetermineStubCallMode(), kTaggedSize); break; } @@ -2580,7 +2580,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CHECK(instr->HasOutput()); Operand address(i.MemoryOperand()); __ DecompressTaggedPointer(i.OutputRegister(), address); - EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, + EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i, DetermineStubCallMode(), kTaggedSize); break; } @@ -2588,7 +2588,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CHECK(instr->HasOutput()); Operand address(i.MemoryOperand()); __ DecompressAnyTagged(i.OutputRegister(), address); - EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, + EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i, DetermineStubCallMode(), kTaggedSize); break; } @@ -2599,12 +2599,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (HasImmediateInput(instr, index)) { Immediate value(i.InputImmediate(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kTagged, instr); } else { Register value(i.InputRegister(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kTagged, instr); } break; @@ -2615,7 +2615,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register dst = i.OutputRegister(); __ movq(dst, address); __ DecodeSandboxedPointer(dst); - EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, + EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i, DetermineStubCallMode(), kSystemPointerSize); break; @@ -2627,7 +2627,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CHECK(!HasImmediateInput(instr, index)); Register value(i.InputRegister(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kSandboxedPointer, instr); break; } @@ -2636,7 +2636,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); Operand address(i.MemoryOperand()); __ movq(i.OutputRegister(), address); - EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, + EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i, DetermineStubCallMode(), kInt64Size); } else { size_t index = 0; @@ -2644,12 +2644,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (HasImmediateInput(instr, index)) { Immediate value(i.InputImmediate(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord64, instr); } else { Register value(i.InputRegister(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord64, instr); } } @@ -3206,7 +3206,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I64x2Eq: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); ASSEMBLE_SIMD_BINOP(pcmpeqq); break; } @@ -3486,7 +3486,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( for (int j = 0; j < 4; j++) { imm[j] = i.InputUint32(j); } - SetupSimdImmediateInRegister(tasm(), imm, dst); + SetupSimdImmediateInRegister(masm(), imm, dst); break; } case kX64S128Zero: { @@ -3994,7 +3994,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( mask[j - 1] = i.InputUint32(j); } - SetupSimdImmediateInRegister(tasm(), mask, tmp_simd); + SetupSimdImmediateInRegister(masm(), mask, tmp_simd); __ Pshufb(dst, tmp_simd); } else { // two input operands DCHECK_NE(tmp_simd, i.InputSimd128Register(1)); @@ -4008,7 +4008,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( mask1[j - 2] |= (lane < kSimd128Size ? lane : 0x80) << k; } } - SetupSimdImmediateInRegister(tasm(), mask1, tmp_simd); + SetupSimdImmediateInRegister(masm(), mask1, tmp_simd); __ Pshufb(kScratchDoubleReg, tmp_simd); uint32_t mask2[4] = {}; if (instr->InputAt(1)->IsSimd128Register()) { @@ -4024,7 +4024,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( mask2[j - 2] |= (lane >= kSimd128Size ? (lane & 0x0F) : 0x80) << k; } } - SetupSimdImmediateInRegister(tasm(), mask2, tmp_simd); + SetupSimdImmediateInRegister(masm(), mask2, tmp_simd); __ Pshufb(dst, tmp_simd); __ Por(dst, kScratchDoubleReg); } @@ -5057,8 +5057,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { __ j(greater, &mismatch_return, Label::kNear); __ Ret(parameter_slots * kSystemPointerSize, scratch_reg); __ bind(&mismatch_return); - __ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc_reg, scratch_reg, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); // We use a return instead of a jump for better return address prediction. __ Ret(); } else if (additional_pop_count->IsImmediate()) { @@ -5082,7 +5082,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { } } -void CodeGenerator::FinishCode() { tasm()->PatchConstPool(); } +void CodeGenerator::FinishCode() { masm()->PatchConstPool(); } void CodeGenerator::PrepareForDeoptimizationExits( ZoneDeque* exits) {} diff --git a/src/compiler/backend/x64/instruction-selector-x64.cc b/src/compiler/backend/x64/instruction-selector-x64.cc index a5fdb3529b..ce70d49e60 100644 --- a/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/src/compiler/backend/x64/instruction-selector-x64.cc @@ -224,7 +224,7 @@ class X64OperandGenerator final : public OperandGenerator { m.object().ResolvedValue())) { ptrdiff_t const delta = m.index().ResolvedValue() + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector()->isolate(), m.object().ResolvedValue()); if (is_int32(delta)) { inputs[(*input_count)++] = TempImmediate(static_cast(delta)); @@ -2538,7 +2538,7 @@ void VisitWord64EqualImpl(InstructionSelector* selector, Node* node, return VisitCompare( selector, opcode, g.TempImmediate( - TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), + MacroAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), g.UseRegister(m.left().node()), cont); } } @@ -2576,7 +2576,7 @@ void VisitWord32EqualImpl(InstructionSelector* selector, Node* node, return VisitCompare( selector, opcode, g.TempImmediate( - TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), + MacroAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), g.UseRegister(left), cont); } } diff --git a/src/compiler/basic-block-instrumentor.cc b/src/compiler/basic-block-instrumentor.cc index aa97ce705c..448bed84c9 100644 --- a/src/compiler/basic-block-instrumentor.cc +++ b/src/compiler/basic-block-instrumentor.cc @@ -84,7 +84,7 @@ BasicBlockProfilerData* BasicBlockInstrumentor::Instrument( // PatchBasicBlockCountersReference). An important and subtle point: we // cannot use the root handle basic_block_counters_marker_handle() and must // create a new separate handle. Otherwise - // TurboAssemblerBase::IndirectLoadConstant would helpfully emit a + // MacroAssemblerBase::IndirectLoadConstant would helpfully emit a // root-relative load rather than putting this value in the constants table // where we expect it to be for patching. counters_array = graph->NewNode(common.HeapConstant(Handle::New( diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index d1c5e27779..c5db4bbaa2 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -3473,10 +3473,10 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub( CodeGenerator* code_generator = pipeline.code_generator(); wasm::WasmCompilationResult result; - code_generator->tasm()->GetCode( + code_generator->masm()->GetCode( nullptr, &result.code_desc, code_generator->safepoint_table_builder(), static_cast(code_generator->handler_table_offset())); - result.instr_buffer = code_generator->tasm()->ReleaseBuffer(); + result.instr_buffer = code_generator->masm()->ReleaseBuffer(); result.source_positions = code_generator->GetSourcePositionTable(); result.protected_instructions_data = code_generator->GetProtectedInstructionsData(); @@ -3702,11 +3702,11 @@ void Pipeline::GenerateCodeForWasmFunction( auto result = std::make_unique(); CodeGenerator* code_generator = pipeline.code_generator(); - code_generator->tasm()->GetCode( + code_generator->masm()->GetCode( nullptr, &result->code_desc, code_generator->safepoint_table_builder(), static_cast(code_generator->handler_table_offset())); - result->instr_buffer = code_generator->tasm()->ReleaseBuffer(); + result->instr_buffer = code_generator->masm()->ReleaseBuffer(); result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount(); result->tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots(); result->source_positions = code_generator->GetSourcePositionTable(); diff --git a/src/diagnostics/unwinding-info-win64.cc b/src/diagnostics/unwinding-info-win64.cc index 767eb015ab..a71b866135 100644 --- a/src/diagnostics/unwinding-info-win64.cc +++ b/src/diagnostics/unwinding-info-win64.cc @@ -447,7 +447,7 @@ void InitUnwindingRecord(Record* record, size_t code_size_in_bytes) { // Hardcoded thunk. AssemblerOptions options; options.record_reloc_info_for_serialization = false; - TurboAssembler masm(nullptr, options, CodeObjectRequired::kNo, + MacroAssembler masm(nullptr, options, CodeObjectRequired::kNo, NewAssemblerBuffer(64)); masm.Mov(x16, Operand(reinterpret_cast(&CRASH_HANDLER_FUNCTION_NAME))); diff --git a/src/execution/isolate-data.h b/src/execution/isolate-data.h index 8323cf6a7c..14a5036215 100644 --- a/src/execution/isolate-data.h +++ b/src/execution/isolate-data.h @@ -215,12 +215,12 @@ class IsolateData final { // runtime checks. void* embedder_data_[Internals::kNumIsolateDataSlots] = {}; - // Stores the state of the caller for TurboAssembler::CallCFunction so that + // Stores the state of the caller for MacroAssembler::CallCFunction so that // the sampling CPU profiler can iterate the stack during such calls. These // are stored on IsolateData so that they can be stored to with only one move // instruction in compiled code. // - // The FP and PC that are saved right before TurboAssembler::CallCFunction. + // The FP and PC that are saved right before MacroAssembler::CallCFunction. Address fast_c_call_caller_fp_ = kNullAddress; Address fast_c_call_caller_pc_ = kNullAddress; // The address of the fast API callback right before it's executed from diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index b320aed150..7b9029ba84 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -2302,7 +2302,7 @@ void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { __ LeaveFrame(StackFrame::MAGLEV); // Drop receiver + arguments according to dynamic arguments size. - __ DropArguments(params_size, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, MacroAssembler::kCountIncludesReceiver); __ Ret(); } diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 27a720a73f..84b32d19cc 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -2342,8 +2342,8 @@ void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { __ bind(&drop_dynamic_arg_size); // Drop receiver + arguments according to dynamic arguments size. - __ DropArguments(actual_params_size, r9, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(actual_params_size, r9, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ Ret(); } diff --git a/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/src/regexp/arm64/regexp-macro-assembler-arm64.cc index a265797e24..1739586736 100644 --- a/src/regexp/arm64/regexp-macro-assembler-arm64.cc +++ b/src/regexp/arm64/regexp-macro-assembler-arm64.cc @@ -814,7 +814,7 @@ Handle RegExpMacroAssemblerARM64::GetCode(Handle source) { DCHECK_EQ(registers_to_retain.Count(), kNumCalleeSavedRegisters); __ PushCPURegList(registers_to_retain); - __ Push(lr, fp); + __ Push(lr, fp); __ PushCPURegList(argument_registers); // Set frame pointer in place. @@ -1125,7 +1125,7 @@ Handle RegExpMacroAssemblerARM64::GetCode(Handle source) { // Set stack pointer back to first register to retain. __ Mov(sp, fp); - __ Pop(fp, lr); + __ Pop(fp, lr); // Restore registers. __ PopCPURegList(registers_to_retain); @@ -1656,14 +1656,14 @@ void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) { void RegExpMacroAssemblerARM64::RestoreLinkRegister() { // TODO(v8:10026): Remove when we stop compacting for code objects that are // active on the call stack. - __ Pop(padreg, lr); + __ Pop(padreg, lr); __ Add(lr, lr, Operand(masm_->CodeObject())); } void RegExpMacroAssemblerARM64::SaveLinkRegister() { __ Sub(lr, lr, Operand(masm_->CodeObject())); - __ Push(lr, padreg); + __ Push(lr, padreg); } diff --git a/src/wasm/baseline/arm/liftoff-assembler-arm.h b/src/wasm/baseline/arm/liftoff-assembler-arm.h index c4727f8134..adf797dcaa 100644 --- a/src/wasm/baseline/arm/liftoff-assembler-arm.h +++ b/src/wasm/baseline/arm/liftoff-assembler-arm.h @@ -162,7 +162,7 @@ inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst, LeaveCC, al); } -template inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst, @@ -184,7 +184,7 @@ inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst, Register* later_src_reg = is_left_shift ? &src_low : &src_high; if (*later_src_reg == clobbered_dst_reg) { *later_src_reg = assm->GetUnusedRegister(kGpReg, pinned).gp(); - assm->TurboAssembler::Move(*later_src_reg, clobbered_dst_reg); + assm->MacroAssembler::Move(*later_src_reg, clobbered_dst_reg); } (assm->*op)(dst_low, dst_high, src_low, src_high, amount_capped); @@ -210,14 +210,14 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst, MinOrMax min_or_max) { DCHECK(RegisterType::kSizeInBytes == 4 || RegisterType::kSizeInBytes == 8); if (lhs == rhs) { - assm->TurboAssembler::Move(dst, lhs); + assm->MacroAssembler::Move(dst, lhs); return; } Label done, is_nan; if (min_or_max == MinOrMax::kMin) { - assm->TurboAssembler::FloatMin(dst, lhs, rhs, &is_nan); + assm->MacroAssembler::FloatMin(dst, lhs, rhs, &is_nan); } else { - assm->TurboAssembler::FloatMax(dst, lhs, rhs, &is_nan); + assm->MacroAssembler::FloatMax(dst, lhs, rhs, &is_nan); } assm->b(&done); assm->bind(&is_nan); @@ -547,7 +547,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( bind(&continuation); // Now allocate the stack space. Note that this might do more than just - // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. + // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}. AllocateStackSpace(frame_size); // Jump back to the start of the function, from {pc_offset()} to @@ -584,14 +584,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, RelocInfo::Mode rmode) { switch (value.type().kind()) { case kI32: - TurboAssembler::Move(reg.gp(), Operand(value.to_i32(), rmode)); + MacroAssembler::Move(reg.gp(), Operand(value.to_i32(), rmode)); break; case kI64: { DCHECK(RelocInfo::IsNoInfo(rmode)); int32_t low_word = value.to_i64(); int32_t high_word = value.to_i64() >> 32; - TurboAssembler::Move(reg.low_gp(), Operand(low_word)); - TurboAssembler::Move(reg.high_gp(), Operand(high_word)); + MacroAssembler::Move(reg.low_gp(), Operand(low_word)); + MacroAssembler::Move(reg.high_gp(), Operand(high_word)); break; } case kF32: @@ -1450,7 +1450,7 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { DCHECK_NE(dst, src); DCHECK(kind == kI32 || is_reference(kind)); - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, @@ -1828,7 +1828,7 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, Register amount) { - liftoff::I64Shiftop<&TurboAssembler::LslPair, true>(this, dst, src, amount); + liftoff::I64Shiftop<&MacroAssembler::LslPair, true>(this, dst, src, amount); } void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, @@ -1843,7 +1843,7 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, Register amount) { - liftoff::I64Shiftop<&TurboAssembler::AsrPair, false>(this, dst, src, amount); + liftoff::I64Shiftop<&MacroAssembler::AsrPair, false>(this, dst, src, amount); } void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, @@ -1858,7 +1858,7 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, Register amount) { - liftoff::I64Shiftop<&TurboAssembler::LsrPair, false>(this, dst, src, amount); + liftoff::I64Shiftop<&MacroAssembler::LsrPair, false>(this, dst, src, amount); } void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, @@ -2085,7 +2085,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, LiftoffRegister src, Label* trap) { switch (opcode) { case kExprI32ConvertI64: - TurboAssembler::Move(dst.gp(), src.low_gp()); + MacroAssembler::Move(dst.gp(), src.low_gp()); return true; case kExprI32SConvertF32: { UseScratchRegisterScope temps(this); @@ -2272,7 +2272,7 @@ void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst, void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Move(dst.low_gp(), src.low_gp()); + MacroAssembler::Move(dst.low_gp(), src.low_gp()); mov(dst.high_gp(), Operand(src.low_gp(), ASR, 31)); } @@ -2472,7 +2472,7 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr, } else if (memtype == MachineType::Int64()) { vld1(Neon32, NeonListOperand(dst.low_fp()), NeonMemOperand(actual_src_addr)); - TurboAssembler::Move(dst.high_fp(), dst.low_fp()); + MacroAssembler::Move(dst.high_fp(), dst.low_fp()); } } } @@ -2484,13 +2484,13 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src, UseScratchRegisterScope temps(this); Register actual_src_addr = liftoff::CalculateActualAddress( this, &temps, addr, offset_reg, offset_imm); - TurboAssembler::Move(liftoff::GetSimd128Register(dst), + MacroAssembler::Move(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src)); *protected_load_pc = pc_offset(); LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx); NeonListOperand dst_op = NeonListOperand(load_params.low_op ? dst.low_fp() : dst.high_fp()); - TurboAssembler::LoadLane(load_params.sz, dst_op, load_params.laneidx, + MacroAssembler::LoadLane(load_params.sz, dst_op, load_params.laneidx, NeonMemOperand(actual_src_addr)); } @@ -2506,7 +2506,7 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset, LoadStoreLaneParams store_params(type.mem_rep(), laneidx); NeonListOperand src_op = NeonListOperand(store_params.low_op ? src.low_fp() : src.high_fp()); - TurboAssembler::StoreLane(store_params.sz, src_op, store_params.laneidx, + MacroAssembler::StoreLane(store_params.sz, src_op, store_params.laneidx, NeonMemOperand(actual_dst_addr)); } @@ -2519,7 +2519,7 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst, if (dst == lhs) { // dst will be overwritten, so keep the table somewhere else. QwNeonRegister tbl = temps.AcquireQ(); - TurboAssembler::Move(tbl, liftoff::GetSimd128Register(lhs)); + MacroAssembler::Move(tbl, liftoff::GetSimd128Register(lhs)); table = NeonListOperand(tbl); } @@ -2564,8 +2564,8 @@ void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst, void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Move(dst.low_fp(), src.fp()); - TurboAssembler::Move(dst.high_fp(), src.fp()); + MacroAssembler::Move(dst.low_fp(), src.fp()); + MacroAssembler::Move(dst.high_fp(), src.fp()); } void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst, @@ -4243,7 +4243,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() { void LiftoffAssembler::AssertUnreachable(AbortReason reason) { // Asserts unreachable within the wasm code. - TurboAssembler::AssertUnreachable(reason); + MacroAssembler::AssertUnreachable(reason); } void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { diff --git a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index 1bc42ed72a..7ab34f5b47 100644 --- a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -357,7 +357,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( bind(&continuation); // Now allocate the stack space. Note that this might do more than just - // decrementing the SP; consult {TurboAssembler::Claim}. + // decrementing the SP; consult {MacroAssembler::Claim}. Claim(frame_size, 1); // Jump back to the start of the function, from {pc_offset()} to @@ -3252,7 +3252,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() { } void LiftoffAssembler::AssertUnreachable(AbortReason reason) { - TurboAssembler::AssertUnreachable(reason); + MacroAssembler::AssertUnreachable(reason); } void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { diff --git a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index 0f28d7157a..1db3255a96 100644 --- a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -288,7 +288,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( bind(&continuation); // Now allocate the stack space. Note that this might do more than just - // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. + // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}. AllocateStackSpace(frame_size); // Jump back to the start of the function, from {pc_offset()} to @@ -319,21 +319,21 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, RelocInfo::Mode rmode) { switch (value.type().kind()) { case kI32: - TurboAssembler::Move(reg.gp(), Immediate(value.to_i32(), rmode)); + MacroAssembler::Move(reg.gp(), Immediate(value.to_i32(), rmode)); break; case kI64: { DCHECK(RelocInfo::IsNoInfo(rmode)); int32_t low_word = value.to_i64(); int32_t high_word = value.to_i64() >> 32; - TurboAssembler::Move(reg.low_gp(), Immediate(low_word)); - TurboAssembler::Move(reg.high_gp(), Immediate(high_word)); + MacroAssembler::Move(reg.low_gp(), Immediate(low_word)); + MacroAssembler::Move(reg.high_gp(), Immediate(high_word)); break; } case kF32: - TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); break; case kF64: - TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); break; default: UNREACHABLE(); @@ -1704,7 +1704,7 @@ inline LiftoffRegister ReplaceInPair(LiftoffRegister pair, Register old_reg, inline void Emit64BitShiftOperation( LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src, - Register amount, void (TurboAssembler::*emit_shift)(Register, Register)) { + Register amount, void (MacroAssembler::*emit_shift)(Register, Register)) { // Temporary registers cannot overlap with {dst}. LiftoffRegList pinned{dst}; @@ -1743,7 +1743,7 @@ inline void Emit64BitShiftOperation( void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, Register amount) { liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::ShlPair_cl); + &MacroAssembler::ShlPair_cl); } void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, @@ -1762,7 +1762,7 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, Register amount) { liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::SarPair_cl); + &MacroAssembler::SarPair_cl); } void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, @@ -1781,7 +1781,7 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, Register amount) { liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::ShrPair_cl); + &MacroAssembler::ShrPair_cl); } void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, @@ -2025,10 +2025,10 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) { static constexpr uint32_t kSignBit = uint32_t{1} << 31; if (dst == src) { - TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1); + MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1); Andps(dst, liftoff::kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit - 1); + MacroAssembler::Move(dst, kSignBit - 1); Andps(dst, src); } } @@ -2036,10 +2036,10 @@ void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { static constexpr uint32_t kSignBit = uint32_t{1} << 31; if (dst == src) { - TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit); + MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit); Xorps(dst, liftoff::kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit); + MacroAssembler::Move(dst, kSignBit); Xorps(dst, src); } } @@ -2162,10 +2162,10 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) { static constexpr uint64_t kSignBit = uint64_t{1} << 63; if (dst == src) { - TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1); + MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1); Andpd(dst, liftoff::kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit - 1); + MacroAssembler::Move(dst, kSignBit - 1); Andpd(dst, src); } } @@ -2173,10 +2173,10 @@ void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { static constexpr uint64_t kSignBit = uint64_t{1} << 63; if (dst == src) { - TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit); + MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit); Xorpd(dst, liftoff::kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit); + MacroAssembler::Move(dst, kSignBit); Xorpd(dst, src); } } @@ -2739,7 +2739,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst, assm->cmov(zero, dst.gp(), tmp); } -template +template inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src, base::Optional feature = base::nullopt) { @@ -3279,14 +3279,14 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst, const uint8_t imms[16]) { uint64_t vals[2]; memcpy(vals, imms, sizeof(vals)); - TurboAssembler::Move(dst.fp(), vals[0]); + MacroAssembler::Move(dst.fp(), vals[0]); uint64_t high = vals[1]; Register tmp = GetUnusedRegister(RegClass::kGpReg, {}).gp(); - TurboAssembler::Move(tmp, Immediate(high & 0xffff'ffff)); + MacroAssembler::Move(tmp, Immediate(high & 0xffff'ffff)); Pinsrd(dst.fp(), tmp, 2); - TurboAssembler::Move(tmp, Immediate(high >> 32)); + MacroAssembler::Move(tmp, Immediate(high >> 32)); Pinsrd(dst.fp(), tmp, 3); } @@ -3347,7 +3347,7 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqb>(this, dst, src); } void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst, @@ -3483,7 +3483,7 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqw>(this, dst, src); } void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst, @@ -3694,7 +3694,7 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqd>(this, dst, src); } void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst, @@ -3866,7 +3866,7 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqq>(this, dst, src, SSE4_1); } void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, @@ -4591,7 +4591,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() { } void LiftoffAssembler::AssertUnreachable(AbortReason reason) { - TurboAssembler::AssertUnreachable(reason); + MacroAssembler::AssertUnreachable(reason); } void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { diff --git a/src/wasm/baseline/liftoff-assembler.cc b/src/wasm/baseline/liftoff-assembler.cc index 1e3a861df2..b4e73d0b3a 100644 --- a/src/wasm/baseline/liftoff-assembler.cc +++ b/src/wasm/baseline/liftoff-assembler.cc @@ -610,7 +610,7 @@ AssemblerOptions DefaultLiftoffOptions() { return AssemblerOptions{}; } } // namespace LiftoffAssembler::LiftoffAssembler(std::unique_ptr buffer) - : TurboAssembler(nullptr, DefaultLiftoffOptions(), CodeObjectRequired::kNo, + : MacroAssembler(nullptr, DefaultLiftoffOptions(), CodeObjectRequired::kNo, std::move(buffer)) { set_abort_hard(true); // Avoid calls to Abort. } diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h index e0e9501bcd..15db82e900 100644 --- a/src/wasm/baseline/liftoff-assembler.h +++ b/src/wasm/baseline/liftoff-assembler.h @@ -98,7 +98,7 @@ class FreezeCacheState { #endif }; -class LiftoffAssembler : public TurboAssembler { +class LiftoffAssembler : public MacroAssembler { public: // Each slot in our stack frame currently has exactly 8 bytes. static constexpr int kStackSlotSize = 8; diff --git a/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/src/wasm/baseline/loong64/liftoff-assembler-loong64.h index c439cb5b8a..3ff5c2bace 100644 --- a/src/wasm/baseline/loong64/liftoff-assembler-loong64.h +++ b/src/wasm/baseline/loong64/liftoff-assembler-loong64.h @@ -222,7 +222,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( // We can't run out of space, just pass anything big enough to not cause the // assembler to try to grow the buffer. constexpr int kAvailableSpace = 256; - TurboAssembler patching_assembler( + MacroAssembler patching_assembler( nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); @@ -313,16 +313,16 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, RelocInfo::Mode rmode) { switch (value.type().kind()) { case kI32: - TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); break; case kI64: - TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); break; case kF32: - TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); break; case kF64: - TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); break; default: UNREACHABLE(); @@ -441,27 +441,27 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, break; case LoadType::kI32Load16U: case LoadType::kI64Load16U: - TurboAssembler::Ld_hu(dst.gp(), src_op); + MacroAssembler::Ld_hu(dst.gp(), src_op); break; case LoadType::kI32Load16S: case LoadType::kI64Load16S: - TurboAssembler::Ld_h(dst.gp(), src_op); + MacroAssembler::Ld_h(dst.gp(), src_op); break; case LoadType::kI64Load32U: - TurboAssembler::Ld_wu(dst.gp(), src_op); + MacroAssembler::Ld_wu(dst.gp(), src_op); break; case LoadType::kI32Load: case LoadType::kI64Load32S: - TurboAssembler::Ld_w(dst.gp(), src_op); + MacroAssembler::Ld_w(dst.gp(), src_op); break; case LoadType::kI64Load: - TurboAssembler::Ld_d(dst.gp(), src_op); + MacroAssembler::Ld_d(dst.gp(), src_op); break; case LoadType::kF32Load: - TurboAssembler::Fld_s(dst.fp(), src_op); + MacroAssembler::Fld_s(dst.fp(), src_op); break; case LoadType::kF64Load: - TurboAssembler::Fld_d(dst.fp(), src_op); + MacroAssembler::Fld_d(dst.fp(), src_op); break; case LoadType::kS128Load: UNREACHABLE(); @@ -487,20 +487,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, break; case StoreType::kI32Store16: case StoreType::kI64Store16: - TurboAssembler::St_h(src.gp(), dst_op); + MacroAssembler::St_h(src.gp(), dst_op); break; case StoreType::kI32Store: case StoreType::kI64Store32: - TurboAssembler::St_w(src.gp(), dst_op); + MacroAssembler::St_w(src.gp(), dst_op); break; case StoreType::kI64Store: - TurboAssembler::St_d(src.gp(), dst_op); + MacroAssembler::St_d(src.gp(), dst_op); break; case StoreType::kF32Store: - TurboAssembler::Fst_s(src.fp(), dst_op); + MacroAssembler::Fst_s(src.fp(), dst_op); break; case StoreType::kF64Store: - TurboAssembler::Fst_d(src.fp(), dst_op); + MacroAssembler::Fst_d(src.fp(), dst_op); break; case StoreType::kS128Store: UNREACHABLE(); @@ -887,14 +887,14 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { DCHECK_NE(dst, src); // TODO(ksreten): Handle different sizes here. - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ValueKind kind) { DCHECK_NE(dst, src); if (kind != kS128) { - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } else { UNREACHABLE(); } @@ -917,7 +917,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) { Fst_s(reg.fp(), dst); break; case kF64: - TurboAssembler::Fst_d(reg.fp(), dst); + MacroAssembler::Fst_d(reg.fp(), dst); break; case kS128: UNREACHABLE(); @@ -934,7 +934,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { case kI32: { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); - TurboAssembler::li(scratch, Operand(value.to_i32())); + MacroAssembler::li(scratch, Operand(value.to_i32())); St_w(scratch, dst); break; } @@ -943,7 +943,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { case kRefNull: { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); - TurboAssembler::li(scratch, value.to_i64()); + MacroAssembler::li(scratch, value.to_i64()); St_d(scratch, dst); break; } @@ -971,7 +971,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) { Fld_s(reg.fp(), src); break; case kF64: - TurboAssembler::Fld_d(reg.fp(), src); + MacroAssembler::Fld_d(reg.fp(), src); break; case kS128: UNREACHABLE(); @@ -1023,16 +1023,16 @@ void LiftoffAssembler::LoadSpillAddress(Register dst, int offset, } void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Clz_d(dst.gp(), src.gp()); + MacroAssembler::Clz_d(dst.gp(), src.gp()); } void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Ctz_d(dst.gp(), src.gp()); + MacroAssembler::Ctz_d(dst.gp(), src.gp()); } bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Popcnt_d(dst.gp(), src.gp()); + MacroAssembler::Popcnt_d(dst.gp(), src.gp()); return true; } @@ -1046,42 +1046,42 @@ void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) { } void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { - TurboAssembler::Mul_w(dst, lhs, rhs); + MacroAssembler::Mul_w(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. - TurboAssembler::li(kScratchReg, 1); - TurboAssembler::li(kScratchReg2, 1); - TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); - TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); + MacroAssembler::li(kScratchReg, 1); + MacroAssembler::li(kScratchReg2, 1); + MacroAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); + MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); add_d(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Div_w(dst, lhs, rhs); + MacroAssembler::Div_w(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Div_wu(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Div_wu(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Mod_w(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Mod_w(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Mod_wu(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Mod_wu(dst, lhs, rhs); } #define I32_BINOP(name, instruction) \ @@ -1117,15 +1117,15 @@ I32_BINOP_I(xor, Xor) #undef I32_BINOP_I void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { - TurboAssembler::Clz_w(dst, src); + MacroAssembler::Clz_w(dst, src); } void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { - TurboAssembler::Ctz_w(dst, src); + MacroAssembler::Ctz_w(dst, src); } bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { - TurboAssembler::Popcnt_w(dst, src); + MacroAssembler::Popcnt_w(dst, src); return true; } @@ -1150,55 +1150,55 @@ I32_SHIFTOP_I(shr, srl_w, srli_w) void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm) { - TurboAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm)); + MacroAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm)); } void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp()); } bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable. - TurboAssembler::li(kScratchReg, 1); - TurboAssembler::li(kScratchReg2, 1); - TurboAssembler::LoadZeroOnCondition( + MacroAssembler::li(kScratchReg, 1); + MacroAssembler::li(kScratchReg2, 1); + MacroAssembler::LoadZeroOnCondition( kScratchReg, lhs.gp(), Operand(std::numeric_limits::min()), eq); - TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq); + MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq); add_d(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp()); return true; } @@ -1256,32 +1256,32 @@ void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) { } void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_s(dst, src); + MacroAssembler::Neg_s(dst, src); } void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_d(dst, src); + MacroAssembler::Neg_d(dst, src); } void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float32Min(dst, lhs, rhs, &ool); + MacroAssembler::Float32Min(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs); + MacroAssembler::Float32MinOutOfLine(dst, lhs, rhs); bind(&done); } void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float32Max(dst, lhs, rhs, &ool); + MacroAssembler::Float32Max(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs); + MacroAssembler::Float32MaxOutOfLine(dst, lhs, rhs); bind(&done); } @@ -1293,22 +1293,22 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float64Min(dst, lhs, rhs, &ool); + MacroAssembler::Float64Min(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs); + MacroAssembler::Float64MinOutOfLine(dst, lhs, rhs); bind(&done); } void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float64Max(dst, lhs, rhs, &ool); + MacroAssembler::Float64Max(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs); + MacroAssembler::Float64MaxOutOfLine(dst, lhs, rhs); bind(&done); } @@ -1362,7 +1362,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, LiftoffRegister src, Label* trap) { switch (opcode) { case kExprI32ConvertI64: - TurboAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0); + MacroAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0); return true; case kExprI32SConvertF32: { LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); @@ -1370,20 +1370,20 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_s(rounded.fp(), src.fp()); + MacroAssembler::Trunc_s(rounded.fp(), src.fp()); ftintrz_w_s(kScratchDoubleReg, rounded.fp()); movfr2gr_s(dst.gp(), kScratchDoubleReg); // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, // because INT32_MIN allows easier out-of-bounds detection. - TurboAssembler::Add_w(kScratchReg, dst.gp(), 1); - TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); - TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); + MacroAssembler::Add_w(kScratchReg, dst.gp(), 1); + MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); + MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); // Checking if trap. movgr2fr_w(kScratchDoubleReg, dst.gp()); ffint_s_w(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32UConvertF32: { @@ -1392,18 +1392,18 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_s(rounded.fp(), src.fp()); - TurboAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); + MacroAssembler::Trunc_s(rounded.fp(), src.fp()); + MacroAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); // Avoid UINT32_MAX as an overflow indicator and use 0 instead, // because 0 allows easier out-of-bounds detection. - TurboAssembler::Add_w(kScratchReg, dst.gp(), 1); - TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg); + MacroAssembler::Add_w(kScratchReg, dst.gp(), 1); + MacroAssembler::Movz(dst.gp(), zero_reg, kScratchReg); // Checking if trap. - TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); + MacroAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); fcvt_s_d(converted_back.fp(), converted_back.fp()); - TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32SConvertF64: { @@ -1412,14 +1412,14 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_d(rounded.fp(), src.fp()); + MacroAssembler::Trunc_d(rounded.fp(), src.fp()); ftintrz_w_d(kScratchDoubleReg, rounded.fp()); movfr2gr_s(dst.gp(), kScratchDoubleReg); // Checking if trap. ffint_d_w(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32UConvertF64: { @@ -1428,23 +1428,23 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_d(rounded.fp(), src.fp()); - TurboAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); + MacroAssembler::Trunc_d(rounded.fp(), src.fp()); + MacroAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); // Checking if trap. - TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); - TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); + MacroAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32ReinterpretF32: - TurboAssembler::FmoveLow(dst.gp(), src.fp()); + MacroAssembler::FmoveLow(dst.gp(), src.fp()); return true; case kExprI64SConvertI32: slli_w(dst.gp(), src.gp(), 0); return true; case kExprI64UConvertI32: - TurboAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0); + MacroAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0); return true; case kExprI64SConvertF32: { LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); @@ -1452,29 +1452,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_s(rounded.fp(), src.fp()); + MacroAssembler::Trunc_s(rounded.fp(), src.fp()); ftintrz_l_s(kScratchDoubleReg, rounded.fp()); movfr2gr_d(dst.gp(), kScratchDoubleReg); // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, // because INT64_MIN allows easier out-of-bounds detection. - TurboAssembler::Add_d(kScratchReg, dst.gp(), 1); - TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); - TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); + MacroAssembler::Add_d(kScratchReg, dst.gp(), 1); + MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); + MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); // Checking if trap. movgr2fr_d(kScratchDoubleReg, dst.gp()); ffint_s_l(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI64UConvertF32: { // Real conversion. - TurboAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, + MacroAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, kScratchReg); // Checking if trap. - TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); + MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); return true; } case kExprI64SConvertF64: { @@ -1483,29 +1483,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_d(rounded.fp(), src.fp()); + MacroAssembler::Trunc_d(rounded.fp(), src.fp()); ftintrz_l_d(kScratchDoubleReg, rounded.fp()); movfr2gr_d(dst.gp(), kScratchDoubleReg); // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, // because INT64_MIN allows easier out-of-bounds detection. - TurboAssembler::Add_d(kScratchReg, dst.gp(), 1); - TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); - TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); + MacroAssembler::Add_d(kScratchReg, dst.gp(), 1); + MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); + MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); // Checking if trap. movgr2fr_d(kScratchDoubleReg, dst.gp()); ffint_d_l(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI64UConvertF64: { // Real conversion. - TurboAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, + MacroAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, kScratchReg); // Checking if trap. - TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); + MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); return true; } case kExprI64ReinterpretF64: @@ -1518,13 +1518,13 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, return true; } case kExprF32UConvertI32: - TurboAssembler::Ffint_s_uw(dst.fp(), src.gp()); + MacroAssembler::Ffint_s_uw(dst.fp(), src.gp()); return true; case kExprF32ConvertF64: fcvt_s_d(dst.fp(), src.fp()); return true; case kExprF32ReinterpretI32: - TurboAssembler::FmoveLow(dst.fp(), src.gp()); + MacroAssembler::FmoveLow(dst.fp(), src.gp()); return true; case kExprF64SConvertI32: { LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst}); @@ -1533,7 +1533,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, return true; } case kExprF64UConvertI32: - TurboAssembler::Ffint_d_uw(dst.fp(), src.gp()); + MacroAssembler::Ffint_d_uw(dst.fp(), src.gp()); return true; case kExprF64ConvertF32: fcvt_d_s(dst.fp(), src.fp()); @@ -1548,7 +1548,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI32UConvertSatF32: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF32(src.fp(), kScratchDoubleReg, CULE); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Ftintrz_uw_s(dst.gp(), src.fp(), kScratchDoubleReg); @@ -1562,7 +1562,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI32UConvertSatF64: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF64(src.fp(), kScratchDoubleReg, CULE); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Ftintrz_uw_d(dst.gp(), src.fp(), kScratchDoubleReg); @@ -1576,7 +1576,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI64UConvertSatF32: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF32(src.fp(), kScratchDoubleReg, CULE); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg); @@ -1590,7 +1590,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI64UConvertSatF64: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF64(src.fp(), kScratchDoubleReg, CULE); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg); @@ -1626,11 +1626,11 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, } void LiftoffAssembler::emit_jump(Label* label) { - TurboAssembler::Branch(label); + MacroAssembler::Branch(label); } void LiftoffAssembler::emit_jump(Register target) { - TurboAssembler::Jump(target); + MacroAssembler::Jump(target); } void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, @@ -1639,25 +1639,25 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, const FreezeCacheState& frozen) { if (rhs == no_reg) { DCHECK(kind == kI32 || kind == kI64); - TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); + MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg)); } else { DCHECK((kind == kI32 || kind == kI64) || (is_reference(kind) && (cond == kEqual || cond == kNotEqual))); - TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); + MacroAssembler::Branch(label, cond, lhs, Operand(rhs)); } } void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, Register lhs, int32_t imm, const FreezeCacheState& frozen) { - TurboAssembler::Branch(label, cond, lhs, Operand(imm)); + MacroAssembler::Branch(label, cond, lhs, Operand(imm)); } void LiftoffAssembler::emit_i32_subi_jump_negative( Register value, int subtrahend, Label* result_negative, const FreezeCacheState& frozen) { - TurboAssembler::Sub_d(value, value, Operand(subtrahend)); - TurboAssembler::Branch(result_negative, less, value, Operand(zero_reg)); + MacroAssembler::Sub_d(value, value, Operand(subtrahend)); + MacroAssembler::Branch(result_negative, less, value, Operand(zero_reg)); } void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { @@ -1671,14 +1671,14 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp(); } // Write 1 as result. - TurboAssembler::li(tmp, 1); + MacroAssembler::li(tmp, 1); // If negative condition is true, write 0 as result. Condition neg_cond = NegateCondition(cond); - TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); + MacroAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); // If tmp != dst, result will be moved. - TurboAssembler::Move(dst, tmp); + MacroAssembler::Move(dst, tmp); } void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { @@ -1693,15 +1693,15 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp(); } // Write 1 as result. - TurboAssembler::li(tmp, 1); + MacroAssembler::li(tmp, 1); // If negative condition is true, write 0 as result. Condition neg_cond = NegateCondition(cond); - TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), + MacroAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), neg_cond); // If tmp != dst, result will be moved. - TurboAssembler::Move(dst, tmp); + MacroAssembler::Move(dst, tmp); } namespace liftoff { @@ -1740,26 +1740,26 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { Label not_nan, cont; - TurboAssembler::CompareIsNanF32(lhs, rhs); - TurboAssembler::BranchFalseF(¬_nan); + MacroAssembler::CompareIsNanF32(lhs, rhs); + MacroAssembler::BranchFalseF(¬_nan); // If one of the operands is NaN, return 1 for f32.ne, else 0. if (cond == ne) { - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); } else { - TurboAssembler::Move(dst, zero_reg); + MacroAssembler::Move(dst, zero_reg); } - TurboAssembler::Branch(&cont); + MacroAssembler::Branch(&cont); bind(¬_nan); - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); bool predicate; FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); - TurboAssembler::CompareF32(lhs, rhs, fcond); + MacroAssembler::CompareF32(lhs, rhs, fcond); if (predicate) { - TurboAssembler::LoadZeroIfNotFPUCondition(dst); + MacroAssembler::LoadZeroIfNotFPUCondition(dst); } else { - TurboAssembler::LoadZeroIfFPUCondition(dst); + MacroAssembler::LoadZeroIfFPUCondition(dst); } bind(&cont); @@ -1769,26 +1769,26 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { Label not_nan, cont; - TurboAssembler::CompareIsNanF64(lhs, rhs); - TurboAssembler::BranchFalseF(¬_nan); + MacroAssembler::CompareIsNanF64(lhs, rhs); + MacroAssembler::BranchFalseF(¬_nan); // If one of the operands is NaN, return 1 for f64.ne, else 0. if (cond == ne) { - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); } else { - TurboAssembler::Move(dst, zero_reg); + MacroAssembler::Move(dst, zero_reg); } - TurboAssembler::Branch(&cont); + MacroAssembler::Branch(&cont); bind(¬_nan); - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); bool predicate; FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); - TurboAssembler::CompareF64(lhs, rhs, fcond); + MacroAssembler::CompareF64(lhs, rhs, fcond); if (predicate) { - TurboAssembler::LoadZeroIfNotFPUCondition(dst); + MacroAssembler::LoadZeroIfNotFPUCondition(dst); } else { - TurboAssembler::LoadZeroIfFPUCondition(dst); + MacroAssembler::LoadZeroIfFPUCondition(dst); } bind(&cont); @@ -3001,8 +3001,8 @@ void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst, } void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { - TurboAssembler::Ld_d(limit_address, MemOperand(limit_address, 0)); - TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); + MacroAssembler::Ld_d(limit_address, MemOperand(limit_address, 0)); + MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); } void LiftoffAssembler::CallTrapCallbackForTesting() { @@ -3036,7 +3036,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { unsigned offset = 0; while (!fp_regs.is_empty()) { LiftoffRegister reg = fp_regs.GetFirstRegSet(); - TurboAssembler::Fst_d(reg.fp(), MemOperand(sp, offset)); + MacroAssembler::Fst_d(reg.fp(), MemOperand(sp, offset)); fp_regs.clear(reg); offset += slot_size; } @@ -3049,7 +3049,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { unsigned fp_offset = 0; while (!fp_regs.is_empty()) { LiftoffRegister reg = fp_regs.GetFirstRegSet(); - TurboAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset)); + MacroAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset)); fp_regs.clear(reg); fp_offset += 8; } @@ -3168,7 +3168,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { addi_d(sp, sp, -size); - TurboAssembler::Move(addr, sp); + MacroAssembler::Move(addr, sp); } void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { diff --git a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h index df772ab554..22eda69815 100644 --- a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h +++ b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h @@ -176,19 +176,19 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst, assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst); V8_FALLTHROUGH; case LoadType::kI64Load32U: - assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4); + assm->MacroAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4); break; case LoadType::kI32Load: case LoadType::kI64Load32S: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); break; case LoadType::kI32Load16S: case LoadType::kI64Load16S: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); break; case LoadType::kI32Load16U: case LoadType::kI64Load16U: - assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2); + assm->MacroAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2); break; case LoadType::kF64Load: is_float = true; @@ -196,7 +196,7 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst, assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst); V8_FALLTHROUGH; case LoadType::kI64Load: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); break; default: UNREACHABLE(); @@ -231,10 +231,10 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src, assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src); V8_FALLTHROUGH; case StoreType::kI32Store: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); break; case StoreType::kI32Store16: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); break; case StoreType::kF64Store: is_float = true; @@ -242,13 +242,13 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src, assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src); V8_FALLTHROUGH; case StoreType::kI64Store: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); break; case StoreType::kI64Store32: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); break; case StoreType::kI64Store16: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); break; default: UNREACHABLE(); @@ -340,7 +340,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( // We can't run out of space, just pass anything big enough to not cause the // assembler to try to grow the buffer. constexpr int kAvailableSpace = 256; - TurboAssembler patching_assembler( + MacroAssembler patching_assembler( nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); @@ -429,16 +429,16 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, RelocInfo::Mode rmode) { switch (value.type().kind()) { case kI32: - TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); break; case kI64: - TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); break; case kF32: - TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); break; case kF64: - TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); break; default: UNREACHABLE(); @@ -547,30 +547,30 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, break; case LoadType::kI32Load16U: case LoadType::kI64Load16U: - TurboAssembler::Ulhu(dst.gp(), src_op); + MacroAssembler::Ulhu(dst.gp(), src_op); break; case LoadType::kI32Load16S: case LoadType::kI64Load16S: - TurboAssembler::Ulh(dst.gp(), src_op); + MacroAssembler::Ulh(dst.gp(), src_op); break; case LoadType::kI64Load32U: - TurboAssembler::Ulwu(dst.gp(), src_op); + MacroAssembler::Ulwu(dst.gp(), src_op); break; case LoadType::kI32Load: case LoadType::kI64Load32S: - TurboAssembler::Ulw(dst.gp(), src_op); + MacroAssembler::Ulw(dst.gp(), src_op); break; case LoadType::kI64Load: - TurboAssembler::Uld(dst.gp(), src_op); + MacroAssembler::Uld(dst.gp(), src_op); break; case LoadType::kF32Load: - TurboAssembler::Ulwc1(dst.fp(), src_op, t8); + MacroAssembler::Ulwc1(dst.fp(), src_op, t8); break; case LoadType::kF64Load: - TurboAssembler::Uldc1(dst.fp(), src_op, t8); + MacroAssembler::Uldc1(dst.fp(), src_op, t8); break; case LoadType::kS128Load: - TurboAssembler::ld_b(dst.fp().toW(), src_op); + MacroAssembler::ld_b(dst.fp().toW(), src_op); break; default: UNREACHABLE(); @@ -613,23 +613,23 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, break; case StoreType::kI32Store16: case StoreType::kI64Store16: - TurboAssembler::Ush(src.gp(), dst_op, t8); + MacroAssembler::Ush(src.gp(), dst_op, t8); break; case StoreType::kI32Store: case StoreType::kI64Store32: - TurboAssembler::Usw(src.gp(), dst_op); + MacroAssembler::Usw(src.gp(), dst_op); break; case StoreType::kI64Store: - TurboAssembler::Usd(src.gp(), dst_op); + MacroAssembler::Usd(src.gp(), dst_op); break; case StoreType::kF32Store: - TurboAssembler::Uswc1(src.fp(), dst_op, t8); + MacroAssembler::Uswc1(src.fp(), dst_op, t8); break; case StoreType::kF64Store: - TurboAssembler::Usdc1(src.fp(), dst_op, t8); + MacroAssembler::Usdc1(src.fp(), dst_op, t8); break; case StoreType::kS128Store: - TurboAssembler::st_b(src.fp().toW(), dst_op); + MacroAssembler::st_b(src.fp().toW(), dst_op); break; default: UNREACHABLE(); @@ -987,16 +987,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { DCHECK_NE(dst, src); // TODO(ksreten): Handle different sizes here. - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ValueKind kind) { DCHECK_NE(dst, src); if (kind != kS128) { - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } else { - TurboAssembler::move_v(dst.toW(), src.toW()); + MacroAssembler::move_v(dst.toW(), src.toW()); } } @@ -1017,10 +1017,10 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) { Swc1(reg.fp(), dst); break; case kF64: - TurboAssembler::Sdc1(reg.fp(), dst); + MacroAssembler::Sdc1(reg.fp(), dst); break; case kS128: - TurboAssembler::st_b(reg.fp().toW(), dst); + MacroAssembler::st_b(reg.fp().toW(), dst); break; default: UNREACHABLE(); @@ -1032,14 +1032,14 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { MemOperand dst = liftoff::GetStackSlot(offset); switch (value.type().kind()) { case kI32: { - TurboAssembler::li(kScratchReg, Operand(value.to_i32())); + MacroAssembler::li(kScratchReg, Operand(value.to_i32())); Sw(kScratchReg, dst); break; } case kI64: case kRef: case kRefNull: { - TurboAssembler::li(kScratchReg, value.to_i64()); + MacroAssembler::li(kScratchReg, value.to_i64()); Sd(kScratchReg, dst); break; } @@ -1065,10 +1065,10 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) { Lwc1(reg.fp(), src); break; case kF64: - TurboAssembler::Ldc1(reg.fp(), src); + MacroAssembler::Ldc1(reg.fp(), src); break; case kS128: - TurboAssembler::ld_b(reg.fp().toW(), src); + MacroAssembler::ld_b(reg.fp().toW(), src); break; default: UNREACHABLE(); @@ -1117,16 +1117,16 @@ void LiftoffAssembler::LoadSpillAddress(Register dst, int offset, } void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Dclz(dst.gp(), src.gp()); + MacroAssembler::Dclz(dst.gp(), src.gp()); } void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Dctz(dst.gp(), src.gp()); + MacroAssembler::Dctz(dst.gp(), src.gp()); } bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Dpopcnt(dst.gp(), src.gp()); + MacroAssembler::Dpopcnt(dst.gp(), src.gp()); return true; } @@ -1140,42 +1140,42 @@ void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) { } void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { - TurboAssembler::Mul(dst, lhs, rhs); + MacroAssembler::Mul(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. - TurboAssembler::li(kScratchReg, 1); - TurboAssembler::li(kScratchReg2, 1); - TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); - TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); + MacroAssembler::li(kScratchReg, 1); + MacroAssembler::li(kScratchReg2, 1); + MacroAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); + MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); daddu(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Div(dst, lhs, rhs); + MacroAssembler::Div(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Divu(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Divu(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Mod(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Mod(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Modu(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Modu(dst, lhs, rhs); } #define I32_BINOP(name, instruction) \ @@ -1211,15 +1211,15 @@ I32_BINOP_I(xor, Xor) #undef I32_BINOP_I void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { - TurboAssembler::Clz(dst, src); + MacroAssembler::Clz(dst, src); } void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { - TurboAssembler::Ctz(dst, src); + MacroAssembler::Ctz(dst, src); } bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { - TurboAssembler::Popcnt(dst, src); + MacroAssembler::Popcnt(dst, src); return true; } @@ -1244,55 +1244,55 @@ I32_SHIFTOP_I(shr, srl) void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm) { - TurboAssembler::Daddu(dst.gp(), lhs.gp(), Operand(imm)); + MacroAssembler::Daddu(dst.gp(), lhs.gp(), Operand(imm)); } void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp()); } bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable. - TurboAssembler::li(kScratchReg, 1); - TurboAssembler::li(kScratchReg2, 1); - TurboAssembler::LoadZeroOnCondition( + MacroAssembler::li(kScratchReg, 1); + MacroAssembler::li(kScratchReg2, 1); + MacroAssembler::LoadZeroOnCondition( kScratchReg, lhs.gp(), Operand(std::numeric_limits::min()), eq); - TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq); + MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq); daddu(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Ddiv(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Ddiv(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Ddivu(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Ddivu(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Dmod(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Dmod(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Dmodu(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Dmodu(dst.gp(), lhs.gp(), rhs.gp()); return true; } @@ -1354,32 +1354,32 @@ void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) { } void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_s(dst, src); + MacroAssembler::Neg_s(dst, src); } void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_d(dst, src); + MacroAssembler::Neg_d(dst, src); } void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float32Min(dst, lhs, rhs, &ool); + MacroAssembler::Float32Min(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs); + MacroAssembler::Float32MinOutOfLine(dst, lhs, rhs); bind(&done); } void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float32Max(dst, lhs, rhs, &ool); + MacroAssembler::Float32Max(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs); + MacroAssembler::Float32MaxOutOfLine(dst, lhs, rhs); bind(&done); } @@ -1410,22 +1410,22 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float64Min(dst, lhs, rhs, &ool); + MacroAssembler::Float64Min(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs); + MacroAssembler::Float64MinOutOfLine(dst, lhs, rhs); bind(&done); } void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float64Max(dst, lhs, rhs, &ool); + MacroAssembler::Float64Max(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs); + MacroAssembler::Float64MaxOutOfLine(dst, lhs, rhs); bind(&done); } @@ -1498,7 +1498,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, LiftoffRegister src, Label* trap) { switch (opcode) { case kExprI32ConvertI64: - TurboAssembler::Ext(dst.gp(), src.gp(), 0, 32); + MacroAssembler::Ext(dst.gp(), src.gp(), 0, 32); return true; case kExprI32SConvertF32: { LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); @@ -1506,20 +1506,20 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); + MacroAssembler::Trunc_s_s(rounded.fp(), src.fp()); trunc_w_s(kScratchDoubleReg, rounded.fp()); mfc1(dst.gp(), kScratchDoubleReg); // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, // because INT32_MIN allows easier out-of-bounds detection. - TurboAssembler::Addu(kScratchReg, dst.gp(), 1); - TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); - TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); + MacroAssembler::Addu(kScratchReg, dst.gp(), 1); + MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); + MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); // Checking if trap. mtc1(dst.gp(), kScratchDoubleReg); cvt_s_w(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32UConvertF32: { @@ -1528,18 +1528,18 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); - TurboAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); + MacroAssembler::Trunc_s_s(rounded.fp(), src.fp()); + MacroAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); // Avoid UINT32_MAX as an overflow indicator and use 0 instead, // because 0 allows easier out-of-bounds detection. - TurboAssembler::Addu(kScratchReg, dst.gp(), 1); - TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg); + MacroAssembler::Addu(kScratchReg, dst.gp(), 1); + MacroAssembler::Movz(dst.gp(), zero_reg, kScratchReg); // Checking if trap. - TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp()); + MacroAssembler::Cvt_d_uw(converted_back.fp(), dst.gp()); cvt_s_d(converted_back.fp(), converted_back.fp()); - TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32SConvertF64: { @@ -1548,14 +1548,14 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_d_d(rounded.fp(), src.fp()); + MacroAssembler::Trunc_d_d(rounded.fp(), src.fp()); trunc_w_d(kScratchDoubleReg, rounded.fp()); mfc1(dst.gp(), kScratchDoubleReg); // Checking if trap. cvt_d_w(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32UConvertF64: { @@ -1564,23 +1564,23 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_d_d(rounded.fp(), src.fp()); - TurboAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); + MacroAssembler::Trunc_d_d(rounded.fp(), src.fp()); + MacroAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); // Checking if trap. - TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp()); - TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::Cvt_d_uw(converted_back.fp(), dst.gp()); + MacroAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32ReinterpretF32: - TurboAssembler::FmoveLow(dst.gp(), src.fp()); + MacroAssembler::FmoveLow(dst.gp(), src.fp()); return true; case kExprI64SConvertI32: sll(dst.gp(), src.gp(), 0); return true; case kExprI64UConvertI32: - TurboAssembler::Dext(dst.gp(), src.gp(), 0, 32); + MacroAssembler::Dext(dst.gp(), src.gp(), 0, 32); return true; case kExprI64SConvertF32: { LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); @@ -1588,29 +1588,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); + MacroAssembler::Trunc_s_s(rounded.fp(), src.fp()); trunc_l_s(kScratchDoubleReg, rounded.fp()); dmfc1(dst.gp(), kScratchDoubleReg); // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, // because INT64_MIN allows easier out-of-bounds detection. - TurboAssembler::Daddu(kScratchReg, dst.gp(), 1); - TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); - TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); + MacroAssembler::Daddu(kScratchReg, dst.gp(), 1); + MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); + MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); // Checking if trap. dmtc1(dst.gp(), kScratchDoubleReg); cvt_s_l(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI64UConvertF32: { // Real conversion. - TurboAssembler::Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, + MacroAssembler::Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, kScratchReg); // Checking if trap. - TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); + MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); return true; } case kExprI64SConvertF64: { @@ -1619,29 +1619,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_d_d(rounded.fp(), src.fp()); + MacroAssembler::Trunc_d_d(rounded.fp(), src.fp()); trunc_l_d(kScratchDoubleReg, rounded.fp()); dmfc1(dst.gp(), kScratchDoubleReg); // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, // because INT64_MIN allows easier out-of-bounds detection. - TurboAssembler::Daddu(kScratchReg, dst.gp(), 1); - TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); - TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); + MacroAssembler::Daddu(kScratchReg, dst.gp(), 1); + MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); + MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); // Checking if trap. dmtc1(dst.gp(), kScratchDoubleReg); cvt_d_l(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI64UConvertF64: { // Real conversion. - TurboAssembler::Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, + MacroAssembler::Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, kScratchReg); // Checking if trap. - TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); + MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); return true; } case kExprI64ReinterpretF64: @@ -1654,13 +1654,13 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, return true; } case kExprF32UConvertI32: - TurboAssembler::Cvt_s_uw(dst.fp(), src.gp()); + MacroAssembler::Cvt_s_uw(dst.fp(), src.gp()); return true; case kExprF32ConvertF64: cvt_s_d(dst.fp(), src.fp()); return true; case kExprF32ReinterpretI32: - TurboAssembler::FmoveLow(dst.fp(), src.gp()); + MacroAssembler::FmoveLow(dst.fp(), src.gp()); return true; case kExprF64SConvertI32: { LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst}); @@ -1669,7 +1669,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, return true; } case kExprF64UConvertI32: - TurboAssembler::Cvt_d_uw(dst.fp(), src.gp()); + MacroAssembler::Cvt_d_uw(dst.fp(), src.gp()); return true; case kExprF64ConvertF32: cvt_d_s(dst.fp(), src.fp()); @@ -1688,7 +1688,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, CompareIsNanF32(src.fp(), src.fp()); BranchTrueShortF(&done); li(dst.gp(), static_cast(std::numeric_limits::min())); - TurboAssembler::Move( + MacroAssembler::Move( kScratchDoubleReg, static_cast(std::numeric_limits::min())); CompareF32(OLT, src.fp(), kScratchDoubleReg); @@ -1702,7 +1702,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI32UConvertSatF32: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF32(ULE, src.fp(), kScratchDoubleReg); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Trunc_uw_s(dst.gp(), src.fp(), kScratchDoubleReg); @@ -1719,7 +1719,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, CompareIsNanF64(src.fp(), src.fp()); BranchTrueShortF(&done); li(dst.gp(), static_cast(std::numeric_limits::min())); - TurboAssembler::Move( + MacroAssembler::Move( kScratchDoubleReg, static_cast(std::numeric_limits::min())); CompareF64(OLT, src.fp(), kScratchDoubleReg); @@ -1733,7 +1733,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI32UConvertSatF64: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF64(ULE, src.fp(), kScratchDoubleReg); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Trunc_uw_d(dst.gp(), src.fp(), kScratchDoubleReg); @@ -1750,7 +1750,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, CompareIsNanF32(src.fp(), src.fp()); BranchTrueShortF(&done); li(dst.gp(), static_cast(std::numeric_limits::min())); - TurboAssembler::Move( + MacroAssembler::Move( kScratchDoubleReg, static_cast(std::numeric_limits::min())); CompareF32(OLT, src.fp(), kScratchDoubleReg); @@ -1764,7 +1764,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI64UConvertSatF32: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF32(ULE, src.fp(), kScratchDoubleReg); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, no_reg); @@ -1781,7 +1781,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, CompareIsNanF64(src.fp(), src.fp()); BranchTrueShortF(&done); li(dst.gp(), static_cast(std::numeric_limits::min())); - TurboAssembler::Move( + MacroAssembler::Move( kScratchDoubleReg, static_cast(std::numeric_limits::min())); CompareF64(OLT, src.fp(), kScratchDoubleReg); @@ -1795,7 +1795,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI64UConvertSatF64: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF64(ULE, src.fp(), kScratchDoubleReg); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, no_reg); @@ -1831,11 +1831,11 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, } void LiftoffAssembler::emit_jump(Label* label) { - TurboAssembler::Branch(label); + MacroAssembler::Branch(label); } void LiftoffAssembler::emit_jump(Register target) { - TurboAssembler::Jump(target); + MacroAssembler::Jump(target); } void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, @@ -1844,25 +1844,25 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, const FreezeCacheState& frozen) { if (rhs == no_reg) { DCHECK(kind == kI32 || kind == kI64); - TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); + MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg)); } else { DCHECK((kind == kI32 || kind == kI64) || (is_reference(kind) && (cond == kEqual || cond == kNotEqual))); - TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); + MacroAssembler::Branch(label, cond, lhs, Operand(rhs)); } } void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, Register lhs, int32_t imm, const FreezeCacheState& frozen) { - TurboAssembler::Branch(label, cond, lhs, Operand(imm)); + MacroAssembler::Branch(label, cond, lhs, Operand(imm)); } void LiftoffAssembler::emit_i32_subi_jump_negative( Register value, int subtrahend, Label* result_negative, const FreezeCacheState& frozen) { - TurboAssembler::Dsubu(value, value, Operand(subtrahend)); - TurboAssembler::Branch(result_negative, less, value, Operand(zero_reg)); + MacroAssembler::Dsubu(value, value, Operand(subtrahend)); + MacroAssembler::Branch(result_negative, less, value, Operand(zero_reg)); } void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { @@ -1876,14 +1876,14 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp(); } // Write 1 as result. - TurboAssembler::li(tmp, 1); + MacroAssembler::li(tmp, 1); // If negative condition is true, write 0 as result. Condition neg_cond = NegateCondition(cond); - TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); + MacroAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); // If tmp != dst, result will be moved. - TurboAssembler::Move(dst, tmp); + MacroAssembler::Move(dst, tmp); } void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { @@ -1898,15 +1898,15 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp(); } // Write 1 as result. - TurboAssembler::li(tmp, 1); + MacroAssembler::li(tmp, 1); // If negative condition is true, write 0 as result. Condition neg_cond = NegateCondition(cond); - TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), + MacroAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), neg_cond); // If tmp != dst, result will be moved. - TurboAssembler::Move(dst, tmp); + MacroAssembler::Move(dst, tmp); } namespace liftoff { @@ -1965,26 +1965,26 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { Label not_nan, cont; - TurboAssembler::CompareIsNanF32(lhs, rhs); - TurboAssembler::BranchFalseF(¬_nan); + MacroAssembler::CompareIsNanF32(lhs, rhs); + MacroAssembler::BranchFalseF(¬_nan); // If one of the operands is NaN, return 1 for f32.ne, else 0. if (cond == ne) { - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); } else { - TurboAssembler::Move(dst, zero_reg); + MacroAssembler::Move(dst, zero_reg); } - TurboAssembler::Branch(&cont); + MacroAssembler::Branch(&cont); bind(¬_nan); - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); bool predicate; FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); - TurboAssembler::CompareF32(fcond, lhs, rhs); + MacroAssembler::CompareF32(fcond, lhs, rhs); if (predicate) { - TurboAssembler::LoadZeroIfNotFPUCondition(dst); + MacroAssembler::LoadZeroIfNotFPUCondition(dst); } else { - TurboAssembler::LoadZeroIfFPUCondition(dst); + MacroAssembler::LoadZeroIfFPUCondition(dst); } bind(&cont); @@ -1994,26 +1994,26 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { Label not_nan, cont; - TurboAssembler::CompareIsNanF64(lhs, rhs); - TurboAssembler::BranchFalseF(¬_nan); + MacroAssembler::CompareIsNanF64(lhs, rhs); + MacroAssembler::BranchFalseF(¬_nan); // If one of the operands is NaN, return 1 for f64.ne, else 0. if (cond == ne) { - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); } else { - TurboAssembler::Move(dst, zero_reg); + MacroAssembler::Move(dst, zero_reg); } - TurboAssembler::Branch(&cont); + MacroAssembler::Branch(&cont); bind(¬_nan); - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); bool predicate; FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); - TurboAssembler::CompareF64(fcond, lhs, rhs); + MacroAssembler::CompareF64(fcond, lhs, rhs); if (predicate) { - TurboAssembler::LoadZeroIfNotFPUCondition(dst); + MacroAssembler::LoadZeroIfNotFPUCondition(dst); } else { - TurboAssembler::LoadZeroIfFPUCondition(dst); + MacroAssembler::LoadZeroIfFPUCondition(dst); } bind(&cont); @@ -2111,7 +2111,7 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src, MemOperand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm); *protected_load_pc = pc_offset(); LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx); - TurboAssembler::LoadLane(load_params.sz, dst.fp().toW(), laneidx, src_op); + MacroAssembler::LoadLane(load_params.sz, dst.fp().toW(), laneidx, src_op); } void LiftoffAssembler::StoreLane(Register dst, Register offset, @@ -2121,7 +2121,7 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset, MemOperand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm); if (protected_store_pc) *protected_store_pc = pc_offset(); LoadStoreLaneParams store_params(type.mem_rep(), lane); - TurboAssembler::StoreLane(store_params.sz, src.fp().toW(), lane, dst_op); + MacroAssembler::StoreLane(store_params.sz, src.fp().toW(), lane, dst_op); } void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst, @@ -2228,25 +2228,25 @@ void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst, void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::FmoveLow(kScratchReg, src.fp()); + MacroAssembler::FmoveLow(kScratchReg, src.fp()); fill_w(dst.fp().toW(), kScratchReg); } void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Move(kScratchReg, src.fp()); + MacroAssembler::Move(kScratchReg, src.fp()); fill_d(dst.fp().toW(), kScratchReg); } #define SIMD_BINOP(name1, name2, type) \ void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \ - TurboAssembler::ExtMulLow(type, dst.fp().toW(), src1.fp().toW(), \ + MacroAssembler::ExtMulLow(type, dst.fp().toW(), src1.fp().toW(), \ src2.fp().toW()); \ } \ void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \ - TurboAssembler::ExtMulHigh(type, dst.fp().toW(), src1.fp().toW(), \ + MacroAssembler::ExtMulHigh(type, dst.fp().toW(), src1.fp().toW(), \ src2.fp().toW()); \ } @@ -2264,7 +2264,7 @@ SIMD_BINOP(i64x2, i32x4_u, MSAU32) #define SIMD_BINOP(name1, name2, type) \ void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \ LiftoffRegister dst, LiftoffRegister src) { \ - TurboAssembler::ExtAddPairwise(type, dst.fp().toW(), src.fp().toW()); \ + MacroAssembler::ExtAddPairwise(type, dst.fp().toW(), src.fp().toW()); \ } SIMD_BINOP(i16x8, i8x16_s, MSAS8) @@ -3455,14 +3455,14 @@ void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx) { copy_u_w(kScratchReg, lhs.fp().toW(), imm_lane_idx); - TurboAssembler::FmoveLow(dst.fp(), kScratchReg); + MacroAssembler::FmoveLow(dst.fp(), kScratchReg); } void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx) { copy_s_d(kScratchReg, lhs.fp().toW(), imm_lane_idx); - TurboAssembler::Move(dst.fp(), kScratchReg); + MacroAssembler::Move(dst.fp(), kScratchReg); } void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst, @@ -3509,7 +3509,7 @@ void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx) { - TurboAssembler::FmoveLow(kScratchReg, src2.fp()); + MacroAssembler::FmoveLow(kScratchReg, src2.fp()); if (dst != src1) { move_v(dst.fp().toW(), src1.fp().toW()); } @@ -3520,7 +3520,7 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx) { - TurboAssembler::Move(kScratchReg, src2.fp()); + MacroAssembler::Move(kScratchReg, src2.fp()); if (dst != src1) { move_v(dst.fp().toW(), src1.fp().toW()); } @@ -3556,8 +3556,8 @@ void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst, } void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { - TurboAssembler::Uld(limit_address, MemOperand(limit_address)); - TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); + MacroAssembler::Uld(limit_address, MemOperand(limit_address)); + MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); } void LiftoffAssembler::CallTrapCallbackForTesting() { @@ -3592,9 +3592,9 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { while (!fp_regs.is_empty()) { LiftoffRegister reg = fp_regs.GetFirstRegSet(); if (IsEnabled(MIPS_SIMD)) { - TurboAssembler::st_d(reg.fp().toW(), MemOperand(sp, offset)); + MacroAssembler::st_d(reg.fp().toW(), MemOperand(sp, offset)); } else { - TurboAssembler::Sdc1(reg.fp(), MemOperand(sp, offset)); + MacroAssembler::Sdc1(reg.fp(), MemOperand(sp, offset)); } fp_regs.clear(reg); offset += slot_size; @@ -3609,9 +3609,9 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { while (!fp_regs.is_empty()) { LiftoffRegister reg = fp_regs.GetFirstRegSet(); if (IsEnabled(MIPS_SIMD)) { - TurboAssembler::ld_d(reg.fp().toW(), MemOperand(sp, fp_offset)); + MacroAssembler::ld_d(reg.fp().toW(), MemOperand(sp, fp_offset)); } else { - TurboAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset)); + MacroAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset)); } fp_regs.clear(reg); fp_offset += (IsEnabled(MIPS_SIMD) ? 16 : 8); @@ -3648,7 +3648,7 @@ void LiftoffAssembler::RecordSpillsInSafepoint( void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { DCHECK_LT(num_stack_slots, (1 << 16) / kSystemPointerSize); // 16 bit immediate - TurboAssembler::DropAndRet(static_cast(num_stack_slots)); + MacroAssembler::DropAndRet(static_cast(num_stack_slots)); } void LiftoffAssembler::CallC(const ValueKindSig* sig, @@ -3730,7 +3730,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { Daddu(sp, sp, -size); - TurboAssembler::Move(addr, sp); + MacroAssembler::Move(addr, sp); } void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { diff --git a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h index e376c75b23..d9888a9b4f 100644 --- a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h +++ b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h @@ -197,7 +197,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( bind(&continuation); // Now allocate the stack space. Note that this might do more than just - // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. + // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}. SubS64(sp, sp, Operand(frame_size), r0); // Jump back to the start of the function, from {pc_offset()} to @@ -692,7 +692,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg, switch (type.value()) { case StoreType::kI32Store8: case StoreType::kI64Store8: { - TurboAssembler::AtomicExchange(dst, value.gp(), result.gp()); + MacroAssembler::AtomicExchange(dst, value.gp(), result.gp()); break; } case StoreType::kI32Store16: @@ -702,10 +702,10 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg, push(scratch); ByteReverseU16(r0, value.gp(), scratch); pop(scratch); - TurboAssembler::AtomicExchange(dst, r0, result.gp()); + MacroAssembler::AtomicExchange(dst, r0, result.gp()); ByteReverseU16(result.gp(), result.gp(), ip); } else { - TurboAssembler::AtomicExchange(dst, value.gp(), result.gp()); + MacroAssembler::AtomicExchange(dst, value.gp(), result.gp()); } break; } @@ -716,20 +716,20 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg, push(scratch); ByteReverseU32(r0, value.gp(), scratch); pop(scratch); - TurboAssembler::AtomicExchange(dst, r0, result.gp()); + MacroAssembler::AtomicExchange(dst, r0, result.gp()); ByteReverseU32(result.gp(), result.gp(), ip); } else { - TurboAssembler::AtomicExchange(dst, value.gp(), result.gp()); + MacroAssembler::AtomicExchange(dst, value.gp(), result.gp()); } break; } case StoreType::kI64Store: { if (is_be) { ByteReverseU64(r0, value.gp()); - TurboAssembler::AtomicExchange(dst, r0, result.gp()); + MacroAssembler::AtomicExchange(dst, r0, result.gp()); ByteReverseU64(result.gp(), result.gp()); } else { - TurboAssembler::AtomicExchange(dst, value.gp(), result.gp()); + MacroAssembler::AtomicExchange(dst, value.gp(), result.gp()); } break; } @@ -760,7 +760,7 @@ void LiftoffAssembler::AtomicCompareExchange( switch (type.value()) { case StoreType::kI32Store8: case StoreType::kI64Store8: { - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); break; } @@ -774,12 +774,12 @@ void LiftoffAssembler::AtomicCompareExchange( ByteReverseU16(new_value.gp(), new_value.gp(), scratch); ByteReverseU16(expected.gp(), expected.gp(), scratch); pop(scratch); - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); ByteReverseU16(result.gp(), result.gp(), r0); Pop(new_value.gp(), expected.gp()); } else { - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); } break; @@ -794,12 +794,12 @@ void LiftoffAssembler::AtomicCompareExchange( ByteReverseU32(new_value.gp(), new_value.gp(), scratch); ByteReverseU32(expected.gp(), expected.gp(), scratch); pop(scratch); - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); ByteReverseU32(result.gp(), result.gp(), r0); Pop(new_value.gp(), expected.gp()); } else { - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); } break; @@ -809,12 +809,12 @@ void LiftoffAssembler::AtomicCompareExchange( Push(new_value.gp(), expected.gp()); ByteReverseU64(new_value.gp(), new_value.gp()); ByteReverseU64(expected.gp(), expected.gp()); - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); ByteReverseU64(result.gp(), result.gp()); Pop(new_value.gp(), expected.gp()); } else { - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); } break; diff --git a/src/wasm/baseline/riscv/liftoff-assembler-riscv.h b/src/wasm/baseline/riscv/liftoff-assembler-riscv.h index 4611e01382..a103fa1865 100644 --- a/src/wasm/baseline/riscv/liftoff-assembler-riscv.h +++ b/src/wasm/baseline/riscv/liftoff-assembler-riscv.h @@ -73,7 +73,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( // We can't run out of space, just pass anything big enough to not cause the // assembler to try to grow the buffer. constexpr int kAvailableSpace = 256; - TurboAssembler patching_assembler( + MacroAssembler patching_assembler( nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); @@ -206,21 +206,21 @@ void LiftoffAssembler::SpillInstance(Register instance) { void LiftoffAssembler::ResetOSRTarget() {} void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_s(dst, src); + MacroAssembler::Neg_s(dst, src); } void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_d(dst, src); + MacroAssembler::Neg_d(dst, src); } void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { - TurboAssembler::Float32Min(dst, lhs, rhs); + MacroAssembler::Float32Min(dst, lhs, rhs); } void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { - TurboAssembler::Float32Max(dst, lhs, rhs); + MacroAssembler::Float32Max(dst, lhs, rhs); } void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, @@ -230,12 +230,12 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { - TurboAssembler::Float64Min(dst, lhs, rhs); + MacroAssembler::Float64Min(dst, lhs, rhs); } void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { - TurboAssembler::Float64Max(dst, lhs, rhs); + MacroAssembler::Float64Max(dst, lhs, rhs); } void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, @@ -302,14 +302,14 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { FPUCondition fcond = ConditionToConditionCmpFPU(cond); - TurboAssembler::CompareF32(dst, fcond, lhs, rhs); + MacroAssembler::CompareF32(dst, fcond, lhs, rhs); } void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { FPUCondition fcond = ConditionToConditionCmpFPU(cond); - TurboAssembler::CompareF64(dst, fcond, lhs, rhs); + MacroAssembler::CompareF64(dst, fcond, lhs, rhs); } bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition, @@ -2070,8 +2070,8 @@ void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst, } void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { - TurboAssembler::LoadWord(limit_address, MemOperand(limit_address)); - TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); + MacroAssembler::LoadWord(limit_address, MemOperand(limit_address)); + MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); } void LiftoffAssembler::CallTrapCallbackForTesting() { @@ -2104,7 +2104,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { int32_t offset = 0; while (!fp_regs.is_empty()) { LiftoffRegister reg = fp_regs.GetFirstRegSet(); - TurboAssembler::StoreDouble(reg.fp(), MemOperand(sp, offset)); + MacroAssembler::StoreDouble(reg.fp(), MemOperand(sp, offset)); fp_regs.clear(reg); offset += sizeof(double); } @@ -2117,7 +2117,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { int32_t fp_offset = 0; while (!fp_regs.is_empty()) { LiftoffRegister reg = fp_regs.GetFirstRegSet(); - TurboAssembler::LoadDouble(reg.fp(), MemOperand(sp, fp_offset)); + MacroAssembler::LoadDouble(reg.fp(), MemOperand(sp, fp_offset)); fp_regs.clear(reg); fp_offset += sizeof(double); } @@ -2151,7 +2151,7 @@ void LiftoffAssembler::RecordSpillsInSafepoint( } void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { - TurboAssembler::DropAndRet(static_cast(num_stack_slots)); + MacroAssembler::DropAndRet(static_cast(num_stack_slots)); } void LiftoffAssembler::CallNativeWasmCode(Address addr) { @@ -2190,7 +2190,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { AddWord(sp, sp, Operand(-size)); - TurboAssembler::Move(addr, sp); + MacroAssembler::Move(addr, sp); } void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { diff --git a/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h b/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h index 8b44f8e962..63def4f714 100644 --- a/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h +++ b/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h @@ -178,22 +178,22 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, RelocInfo::Mode rmode) { switch (value.type().kind()) { case kI32: - TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); break; case kI64: { DCHECK(RelocInfo::IsNoInfo(rmode)); int32_t low_word = value.to_i64(); int32_t high_word = value.to_i64() >> 32; - TurboAssembler::li(reg.low_gp(), Operand(low_word)); - TurboAssembler::li(reg.high_gp(), Operand(high_word)); + MacroAssembler::li(reg.low_gp(), Operand(low_word)); + MacroAssembler::li(reg.high_gp(), Operand(high_word)); break; } case kF32: - TurboAssembler::LoadFPRImmediate(reg.fp(), + MacroAssembler::LoadFPRImmediate(reg.fp(), value.to_f32_boxed().get_bits()); break; case kF64: - TurboAssembler::LoadFPRImmediate(reg.fp(), + MacroAssembler::LoadFPRImmediate(reg.fp(), value.to_f64_boxed().get_bits()); break; default: @@ -262,39 +262,39 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, break; case LoadType::kI64Load8U: Lbu(dst.low_gp(), src_op); - TurboAssembler::mv(dst.high_gp(), zero_reg); + MacroAssembler::mv(dst.high_gp(), zero_reg); break; case LoadType::kI32Load8S: Lb(dst.gp(), src_op); break; case LoadType::kI64Load8S: Lb(dst.low_gp(), src_op); - TurboAssembler::srai(dst.high_gp(), dst.low_gp(), 31); + MacroAssembler::srai(dst.high_gp(), dst.low_gp(), 31); break; case LoadType::kI32Load16U: - TurboAssembler::Lhu(dst.gp(), src_op); + MacroAssembler::Lhu(dst.gp(), src_op); break; case LoadType::kI64Load16U: - TurboAssembler::Lhu(dst.low_gp(), src_op); - TurboAssembler::mv(dst.high_gp(), zero_reg); + MacroAssembler::Lhu(dst.low_gp(), src_op); + MacroAssembler::mv(dst.high_gp(), zero_reg); break; case LoadType::kI32Load16S: - TurboAssembler::Lh(dst.gp(), src_op); + MacroAssembler::Lh(dst.gp(), src_op); break; case LoadType::kI64Load16S: - TurboAssembler::Lh(dst.low_gp(), src_op); - TurboAssembler::srai(dst.high_gp(), dst.low_gp(), 31); + MacroAssembler::Lh(dst.low_gp(), src_op); + MacroAssembler::srai(dst.high_gp(), dst.low_gp(), 31); break; case LoadType::kI64Load32U: - TurboAssembler::Lw(dst.low_gp(), src_op); - TurboAssembler::mv(dst.high_gp(), zero_reg); + MacroAssembler::Lw(dst.low_gp(), src_op); + MacroAssembler::mv(dst.high_gp(), zero_reg); break; case LoadType::kI64Load32S: - TurboAssembler::Lw(dst.low_gp(), src_op); - TurboAssembler::srai(dst.high_gp(), dst.low_gp(), 31); + MacroAssembler::Lw(dst.low_gp(), src_op); + MacroAssembler::srai(dst.high_gp(), dst.low_gp(), 31); break; case LoadType::kI32Load: - TurboAssembler::Lw(dst.gp(), src_op); + MacroAssembler::Lw(dst.gp(), src_op); break; case LoadType::kI64Load: { Lw(dst.low_gp(), src_op); @@ -303,16 +303,16 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Lw(dst.high_gp(), src_op); } break; case LoadType::kF32Load: - TurboAssembler::LoadFloat(dst.fp(), src_op); + MacroAssembler::LoadFloat(dst.fp(), src_op); break; case LoadType::kF64Load: - TurboAssembler::LoadDouble(dst.fp(), src_op); + MacroAssembler::LoadDouble(dst.fp(), src_op); break; case LoadType::kS128Load: { VU.set(kScratchReg, E8, m1); Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg; if (src_op.offset() != 0) { - TurboAssembler::AddWord(src_reg, src_op.rm(), src_op.offset()); + MacroAssembler::AddWord(src_reg, src_op.rm(), src_op.offset()); } vl(dst.fp().toV(), src_reg, 0, E8); break; @@ -362,29 +362,29 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, Sb(src.low_gp(), dst_op); break; case StoreType::kI32Store16: - TurboAssembler::Sh(src.gp(), dst_op); + MacroAssembler::Sh(src.gp(), dst_op); break; case StoreType::kI64Store16: - TurboAssembler::Sh(src.low_gp(), dst_op); + MacroAssembler::Sh(src.low_gp(), dst_op); break; case StoreType::kI32Store: - TurboAssembler::Sw(src.gp(), dst_op); + MacroAssembler::Sw(src.gp(), dst_op); break; case StoreType::kI64Store32: - TurboAssembler::Sw(src.low_gp(), dst_op); + MacroAssembler::Sw(src.low_gp(), dst_op); break; case StoreType::kI64Store: { - TurboAssembler::Sw(src.low_gp(), dst_op); + MacroAssembler::Sw(src.low_gp(), dst_op); dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm + kSystemPointerSize, scratch); - TurboAssembler::Sw(src.high_gp(), dst_op); + MacroAssembler::Sw(src.high_gp(), dst_op); break; } case StoreType::kF32Store: - TurboAssembler::StoreFloat(src.fp(), dst_op); + MacroAssembler::StoreFloat(src.fp(), dst_op); break; case StoreType::kF64Store: - TurboAssembler::StoreDouble(src.fp(), dst_op); + MacroAssembler::StoreDouble(src.fp(), dst_op); break; case StoreType::kS128Store: { VU.set(kScratchReg, E8, m1); @@ -926,14 +926,14 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, StoreFloat(kScratchDoubleReg, dst); break; case kF64: - TurboAssembler::LoadDouble(kScratchDoubleReg, src); - TurboAssembler::StoreDouble(kScratchDoubleReg, dst); + MacroAssembler::LoadDouble(kScratchDoubleReg, src); + MacroAssembler::StoreDouble(kScratchDoubleReg, dst); break; case kS128: { VU.set(kScratchReg, E8, m1); Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg; if (src.offset() != 0) { - TurboAssembler::AddWord(src_reg, src.rm(), src.offset()); + MacroAssembler::AddWord(src_reg, src.rm(), src.offset()); } vl(kSimd128ScratchReg, src_reg, 0, E8); Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg; @@ -951,16 +951,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { DCHECK_NE(dst, src); // TODO(ksreten): Handle different sizes here. - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ValueKind kind) { DCHECK_NE(dst, src); if (kind != kS128) { - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } else { - TurboAssembler::vmv_vv(dst.toV(), dst.toV()); + MacroAssembler::vmv_vv(dst.toV(), dst.toV()); } } @@ -982,7 +982,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) { StoreFloat(reg.fp(), dst); break; case kF64: - TurboAssembler::StoreDouble(reg.fp(), dst); + MacroAssembler::StoreDouble(reg.fp(), dst); break; case kS128: { VU.set(kScratchReg, E8, m1); @@ -1006,7 +1006,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { case kRef: case kRefNull: { LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); - TurboAssembler::li(tmp.gp(), Operand(value.to_i32())); + MacroAssembler::li(tmp.gp(), Operand(value.to_i32())); Sw(tmp.gp(), dst); break; } @@ -1015,8 +1015,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { int32_t low_word = value.to_i64(); int32_t high_word = value.to_i64() >> 32; - TurboAssembler::li(tmp.low_gp(), Operand(low_word)); - TurboAssembler::li(tmp.high_gp(), Operand(high_word)); + MacroAssembler::li(tmp.low_gp(), Operand(low_word)); + MacroAssembler::li(tmp.high_gp(), Operand(high_word)); Sw(tmp.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord)); Sw(tmp.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord)); @@ -1046,13 +1046,13 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) { LoadFloat(reg.fp(), src); break; case kF64: - TurboAssembler::LoadDouble(reg.fp(), src); + MacroAssembler::LoadDouble(reg.fp(), src); break; case kS128: { VU.set(kScratchReg, E8, m1); Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg; if (src.offset() != 0) { - TurboAssembler::AddWord(src_reg, src.rm(), src.offset()); + MacroAssembler::AddWord(src_reg, src.rm(), src.offset()); } vl(reg.fp().toV(), src_reg, 0, E8); break; @@ -1140,8 +1140,8 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, // Produce partial popcnts in the two dst registers. Register src1 = src.high_gp() == dst.low_gp() ? src.high_gp() : src.low_gp(); Register src2 = src.high_gp() == dst.low_gp() ? src.low_gp() : src.high_gp(); - TurboAssembler::Popcnt32(dst.low_gp(), src1, kScratchReg); - TurboAssembler::Popcnt32(dst.high_gp(), src2, kScratchReg); + MacroAssembler::Popcnt32(dst.low_gp(), src1, kScratchReg); + MacroAssembler::Popcnt32(dst.high_gp(), src2, kScratchReg); // Now add the two into the lower dst reg and clear the higher dst reg. AddWord(dst.low_gp(), dst.low_gp(), dst.high_gp()); mv(dst.high_gp(), zero_reg); @@ -1149,40 +1149,40 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, } void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { - TurboAssembler::Mul(dst, lhs, rhs); + MacroAssembler::Mul(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. - TurboAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne); - TurboAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne); + MacroAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne); + MacroAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne); add(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Div(dst, lhs, rhs); + MacroAssembler::Div(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Divu(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Divu(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Mod(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Mod(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Modu(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Modu(dst, lhs, rhs); } #define I32_BINOP(name, instruction) \ @@ -1218,15 +1218,15 @@ I32_BINOP_I(xor, Xor) #undef I32_BINOP_I void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { - TurboAssembler::Clz32(dst, src); + MacroAssembler::Clz32(dst, src); } void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { - TurboAssembler::Ctz32(dst, src); + MacroAssembler::Ctz32(dst, src); } bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { - TurboAssembler::Popcnt32(dst, src, kScratchReg); + MacroAssembler::Popcnt32(dst, src, kScratchReg); return true; } @@ -1254,7 +1254,7 @@ I32_SHIFTOP_I(shr, srli) void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), + MacroAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), lhs.high_gp(), rhs.low_gp(), rhs.high_gp(), kScratchReg, kScratchReg2); } @@ -1294,7 +1294,7 @@ inline bool IsRegInRegPair(LiftoffRegister pair, Register reg) { inline void Emit64BitShiftOperation( LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src, Register amount, - void (TurboAssembler::*emit_shift)(Register, Register, Register, Register, + void (MacroAssembler::*emit_shift)(Register, Register, Register, Register, Register, Register, Register)) { LiftoffRegList pinned{dst, src, amount}; @@ -1313,8 +1313,8 @@ inline void Emit64BitShiftOperation( kScratchReg2); // Place result in destination register. - assm->TurboAssembler::Move(dst.high_gp(), tmp.high_gp()); - assm->TurboAssembler::Move(dst.low_gp(), tmp.low_gp()); + assm->MacroAssembler::Move(dst.high_gp(), tmp.high_gp()); + assm->MacroAssembler::Move(dst.low_gp(), tmp.low_gp()); } else { (assm->*emit_shift)(dst.low_gp(), dst.high_gp(), src.low_gp(), src.high_gp(), amount_capped, kScratchReg, @@ -1325,7 +1325,7 @@ inline void Emit64BitShiftOperation( void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), + MacroAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), lhs.high_gp(), rhs.low_gp(), rhs.high_gp(), kScratchReg, kScratchReg2); } @@ -1339,16 +1339,16 @@ void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, // TODO(riscv32): are there some optimization we can make without // materializing? - TurboAssembler::li(imm_reg.low_gp(), imm_low_word); - TurboAssembler::li(imm_reg.high_gp(), imm_high_word); - TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), + MacroAssembler::li(imm_reg.low_gp(), imm_low_word); + MacroAssembler::li(imm_reg.high_gp(), imm_high_word); + MacroAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), lhs.high_gp(), imm_reg.low_gp(), imm_reg.high_gp(), kScratchReg, kScratchReg2); } void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::SubPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), + MacroAssembler::SubPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), lhs.high_gp(), rhs.low_gp(), rhs.high_gp(), kScratchReg, kScratchReg2); } @@ -1357,7 +1357,7 @@ void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, Register amount) { ASM_CODE_COMMENT(this); liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::ShlPair); + &MacroAssembler::ShlPair); } void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, @@ -1374,14 +1374,14 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, DCHECK_NE(dst.low_gp(), kScratchReg); DCHECK_NE(dst.high_gp(), kScratchReg); - TurboAssembler::ShlPair(dst.low_gp(), dst.high_gp(), src_low, src_high, + MacroAssembler::ShlPair(dst.low_gp(), dst.high_gp(), src_low, src_high, amount, kScratchReg, kScratchReg2); } void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, Register amount) { liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::SarPair); + &MacroAssembler::SarPair); } void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, @@ -1397,14 +1397,14 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, DCHECK_NE(dst.low_gp(), kScratchReg); DCHECK_NE(dst.high_gp(), kScratchReg); - TurboAssembler::SarPair(dst.low_gp(), dst.high_gp(), src_low, src_high, + MacroAssembler::SarPair(dst.low_gp(), dst.high_gp(), src_low, src_high, amount, kScratchReg, kScratchReg2); } void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, Register amount) { liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::ShrPair); + &MacroAssembler::ShrPair); } void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, @@ -1420,7 +1420,7 @@ void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, DCHECK_NE(dst.low_gp(), kScratchReg); DCHECK_NE(dst.high_gp(), kScratchReg); - TurboAssembler::ShrPair(dst.low_gp(), dst.high_gp(), src_low, src_high, + MacroAssembler::ShrPair(dst.low_gp(), dst.high_gp(), src_low, src_high, amount, kScratchReg, kScratchReg2); } @@ -1441,7 +1441,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, LiftoffRegister src, Label* trap) { switch (opcode) { case kExprI32ConvertI64: - TurboAssembler::Move(dst.gp(), src.low_gp()); + MacroAssembler::Move(dst.gp(), src.low_gp()); return true; case kExprI32SConvertF32: case kExprI32UConvertF32: @@ -1481,22 +1481,22 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, // Checking if trap. if (trap != nullptr) { - TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); + MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); } return true; } case kExprI32ReinterpretF32: - TurboAssembler::ExtractLowWordFromF64(dst.gp(), src.fp()); + MacroAssembler::ExtractLowWordFromF64(dst.gp(), src.fp()); return true; case kExprI64SConvertI32: - TurboAssembler::Move(dst.low_gp(), src.gp()); - TurboAssembler::Move(dst.high_gp(), src.gp()); + MacroAssembler::Move(dst.low_gp(), src.gp()); + MacroAssembler::Move(dst.high_gp(), src.gp()); srai(dst.high_gp(), dst.high_gp(), 31); return true; case kExprI64UConvertI32: - TurboAssembler::Move(dst.low_gp(), src.gp()); - TurboAssembler::Move(dst.high_gp(), zero_reg); + MacroAssembler::Move(dst.low_gp(), src.gp()); + MacroAssembler::Move(dst.high_gp(), zero_reg); return true; case kExprI64ReinterpretF64: SubWord(sp, sp, kDoubleSize); @@ -1506,21 +1506,21 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, AddWord(sp, sp, kDoubleSize); return true; case kExprF32SConvertI32: { - TurboAssembler::Cvt_s_w(dst.fp(), src.gp()); + MacroAssembler::Cvt_s_w(dst.fp(), src.gp()); return true; } case kExprF32UConvertI32: - TurboAssembler::Cvt_s_uw(dst.fp(), src.gp()); + MacroAssembler::Cvt_s_uw(dst.fp(), src.gp()); return true; case kExprF32ReinterpretI32: fmv_w_x(dst.fp(), src.gp()); return true; case kExprF64SConvertI32: { - TurboAssembler::Cvt_d_w(dst.fp(), src.gp()); + MacroAssembler::Cvt_d_w(dst.fp(), src.gp()); return true; } case kExprF64UConvertI32: - TurboAssembler::Cvt_d_uw(dst.fp(), src.gp()); + MacroAssembler::Cvt_d_uw(dst.fp(), src.gp()); return true; case kExprF64ConvertF32: fcvt_d_s(dst.fp(), src.fp()); @@ -1591,11 +1591,11 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, } void LiftoffAssembler::emit_jump(Label* label) { - TurboAssembler::Branch(label); + MacroAssembler::Branch(label); } void LiftoffAssembler::emit_jump(Register target) { - TurboAssembler::Jump(target); + MacroAssembler::Jump(target); } void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, @@ -1604,34 +1604,34 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, const FreezeCacheState& frozen) { if (rhs == no_reg) { DCHECK(kind == kI32); - TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); + MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg)); } else { DCHECK((kind == kI32) || (is_reference(kind) && (cond == kEqual || cond == kNotEqual))); - TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); + MacroAssembler::Branch(label, cond, lhs, Operand(rhs)); } } void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, Register lhs, int32_t imm, const FreezeCacheState& frozen) { - TurboAssembler::Branch(label, cond, lhs, Operand(imm)); + MacroAssembler::Branch(label, cond, lhs, Operand(imm)); } void LiftoffAssembler::emit_i32_subi_jump_negative( Register value, int subtrahend, Label* result_negative, const FreezeCacheState& frozen) { SubWord(value, value, Operand(subtrahend)); - TurboAssembler::Branch(result_negative, lt, value, Operand(zero_reg)); + MacroAssembler::Branch(result_negative, lt, value, Operand(zero_reg)); } void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { - TurboAssembler::Sltu(dst, src, 1); + MacroAssembler::Sltu(dst, src, 1); } void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, Register lhs, Register rhs) { - TurboAssembler::CompareI(dst, lhs, Operand(rhs), cond); + MacroAssembler::CompareI(dst, lhs, Operand(rhs), cond); } void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { @@ -1675,7 +1675,7 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, } // Write 1 initially in tmp register. - TurboAssembler::li(tmp, 1); + MacroAssembler::li(tmp, 1); // If high words are equal, then compare low words, else compare high. Branch(&low, eq, lhs.high_gp(), Operand(rhs.high_gp())); @@ -1701,7 +1701,7 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, } bind(&cont); // Move result to dst register if needed. - TurboAssembler::Move(dst, tmp); + MacroAssembler::Move(dst, tmp); } void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) { diff --git a/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h b/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h index c5cdebcbee..afdc3e6a1c 100644 --- a/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h +++ b/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h @@ -153,17 +153,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, RelocInfo::Mode rmode) { switch (value.type().kind()) { case kI32: - TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); break; case kI64: - TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); break; case kF32: - TurboAssembler::LoadFPRImmediate(reg.fp(), + MacroAssembler::LoadFPRImmediate(reg.fp(), value.to_f32_boxed().get_bits()); break; case kF64: - TurboAssembler::LoadFPRImmediate(reg.fp(), + MacroAssembler::LoadFPRImmediate(reg.fp(), value.to_f64_boxed().get_bits()); break; default: @@ -237,33 +237,33 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, break; case LoadType::kI32Load16U: case LoadType::kI64Load16U: - TurboAssembler::Lhu(dst.gp(), src_op); + MacroAssembler::Lhu(dst.gp(), src_op); break; case LoadType::kI32Load16S: case LoadType::kI64Load16S: - TurboAssembler::Lh(dst.gp(), src_op); + MacroAssembler::Lh(dst.gp(), src_op); break; case LoadType::kI64Load32U: - TurboAssembler::Lwu(dst.gp(), src_op); + MacroAssembler::Lwu(dst.gp(), src_op); break; case LoadType::kI32Load: case LoadType::kI64Load32S: - TurboAssembler::Lw(dst.gp(), src_op); + MacroAssembler::Lw(dst.gp(), src_op); break; case LoadType::kI64Load: - TurboAssembler::Ld(dst.gp(), src_op); + MacroAssembler::Ld(dst.gp(), src_op); break; case LoadType::kF32Load: - TurboAssembler::LoadFloat(dst.fp(), src_op); + MacroAssembler::LoadFloat(dst.fp(), src_op); break; case LoadType::kF64Load: - TurboAssembler::LoadDouble(dst.fp(), src_op); + MacroAssembler::LoadDouble(dst.fp(), src_op); break; case LoadType::kS128Load: { VU.set(kScratchReg, E8, m1); Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg; if (src_op.offset() != 0) { - TurboAssembler::AddWord(src_reg, src_op.rm(), src_op.offset()); + MacroAssembler::AddWord(src_reg, src_op.rm(), src_op.offset()); } vl(dst.fp().toV(), src_reg, 0, E8); break; @@ -310,20 +310,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, break; case StoreType::kI32Store16: case StoreType::kI64Store16: - TurboAssembler::Sh(src.gp(), dst_op); + MacroAssembler::Sh(src.gp(), dst_op); break; case StoreType::kI32Store: case StoreType::kI64Store32: - TurboAssembler::Sw(src.gp(), dst_op); + MacroAssembler::Sw(src.gp(), dst_op); break; case StoreType::kI64Store: - TurboAssembler::Sd(src.gp(), dst_op); + MacroAssembler::Sd(src.gp(), dst_op); break; case StoreType::kF32Store: - TurboAssembler::StoreFloat(src.fp(), dst_op); + MacroAssembler::StoreFloat(src.fp(), dst_op); break; case StoreType::kF64Store: - TurboAssembler::StoreDouble(src.fp(), dst_op); + MacroAssembler::StoreDouble(src.fp(), dst_op); break; case StoreType::kS128Store: { VU.set(kScratchReg, E8, m1); @@ -692,14 +692,14 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, StoreFloat(kScratchDoubleReg, dst); break; case kF64: - TurboAssembler::LoadDouble(kScratchDoubleReg, src); - TurboAssembler::StoreDouble(kScratchDoubleReg, dst); + MacroAssembler::LoadDouble(kScratchDoubleReg, src); + MacroAssembler::StoreDouble(kScratchDoubleReg, dst); break; case kS128: { VU.set(kScratchReg, E8, m1); Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg; if (src.offset() != 0) { - TurboAssembler::Add64(src_reg, src.rm(), src.offset()); + MacroAssembler::Add64(src_reg, src.rm(), src.offset()); } vl(kSimd128ScratchReg, src_reg, 0, E8); Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg; @@ -720,16 +720,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { DCHECK_NE(dst, src); // TODO(ksreten): Handle different sizes here. - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ValueKind kind) { DCHECK_NE(dst, src); if (kind != kS128) { - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } else { - TurboAssembler::vmv_vv(dst.toV(), src.toV()); + MacroAssembler::vmv_vv(dst.toV(), src.toV()); } } @@ -750,7 +750,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) { StoreFloat(reg.fp(), dst); break; case kF64: - TurboAssembler::StoreDouble(reg.fp(), dst); + MacroAssembler::StoreDouble(reg.fp(), dst); break; case kS128: { VU.set(kScratchReg, E8, m1); @@ -773,7 +773,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { case kI32: { UseScratchRegisterScope temps(this); Register tmp = temps.Acquire(); - TurboAssembler::li(tmp, Operand(value.to_i32())); + MacroAssembler::li(tmp, Operand(value.to_i32())); Sw(tmp, dst); break; } @@ -782,7 +782,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { case kRefNull: { UseScratchRegisterScope temps(this); Register tmp = temps.Acquire(); - TurboAssembler::li(tmp, value.to_i64()); + MacroAssembler::li(tmp, value.to_i64()); Sd(tmp, dst); break; } @@ -808,13 +808,13 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) { LoadFloat(reg.fp(), src); break; case kF64: - TurboAssembler::LoadDouble(reg.fp(), src); + MacroAssembler::LoadDouble(reg.fp(), src); break; case kS128: { VU.set(kScratchReg, E8, m1); Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg; if (src.offset() != 0) { - TurboAssembler::Add64(src_reg, src.rm(), src.offset()); + MacroAssembler::Add64(src_reg, src.rm(), src.offset()); } vl(reg.fp().toV(), src_reg, 0, E8); break; @@ -861,54 +861,54 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Clz64(dst.gp(), src.gp()); + MacroAssembler::Clz64(dst.gp(), src.gp()); } void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Ctz64(dst.gp(), src.gp()); + MacroAssembler::Ctz64(dst.gp(), src.gp()); } bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg); + MacroAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg); return true; } void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { - TurboAssembler::Mul32(dst, lhs, rhs); + MacroAssembler::Mul32(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. - TurboAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne); - TurboAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne); + MacroAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne); + MacroAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne); add(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Div32(dst, lhs, rhs); + MacroAssembler::Div32(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Divu32(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Divu32(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Mod32(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Mod32(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Modu32(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Modu32(dst, lhs, rhs); } #define I32_BINOP(name, instruction) \ @@ -944,15 +944,15 @@ I32_BINOP_I(xor, Xor) #undef I32_BINOP_I void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { - TurboAssembler::Clz32(dst, src); + MacroAssembler::Clz32(dst, src); } void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { - TurboAssembler::Ctz32(dst, src); + MacroAssembler::Ctz32(dst, src); } bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { - TurboAssembler::Popcnt32(dst, src, kScratchReg); + MacroAssembler::Popcnt32(dst, src, kScratchReg); return true; } @@ -980,48 +980,48 @@ I32_SHIFTOP_I(shr, srliw) void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::Mul64(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Mul64(dst.gp(), lhs.gp(), rhs.gp()); } bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable. - TurboAssembler::CompareI(kScratchReg, lhs.gp(), + MacroAssembler::CompareI(kScratchReg, lhs.gp(), Operand(std::numeric_limits::min()), ne); - TurboAssembler::CompareI(kScratchReg2, rhs.gp(), Operand(-1), ne); + MacroAssembler::CompareI(kScratchReg2, rhs.gp(), Operand(-1), ne); add(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Div64(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Div64(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Divu64(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Divu64(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Mod64(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Mod64(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Modu64(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Modu64(dst.gp(), lhs.gp(), rhs.gp()); return true; } @@ -1098,7 +1098,7 @@ void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm) { - TurboAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm)); + MacroAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm)); } void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) { ZeroExtendWord(dst, src); @@ -1125,7 +1125,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, // According to WebAssembly spec, if I64 value does not fit the range of // I32, the value is undefined. Therefore, We use sign extension to // implement I64 to I32 truncation - TurboAssembler::SignExtendWord(dst.gp(), src.gp()); + MacroAssembler::SignExtendWord(dst.gp(), src.gp()); return true; case kExprI32SConvertF32: case kExprI32UConvertF32: @@ -1172,39 +1172,39 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, // Checking if trap. if (trap != nullptr) { - TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); + MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); } return true; } case kExprI32ReinterpretF32: - TurboAssembler::ExtractLowWordFromF64(dst.gp(), src.fp()); + MacroAssembler::ExtractLowWordFromF64(dst.gp(), src.fp()); return true; case kExprI64SConvertI32: - TurboAssembler::SignExtendWord(dst.gp(), src.gp()); + MacroAssembler::SignExtendWord(dst.gp(), src.gp()); return true; case kExprI64UConvertI32: - TurboAssembler::ZeroExtendWord(dst.gp(), src.gp()); + MacroAssembler::ZeroExtendWord(dst.gp(), src.gp()); return true; case kExprI64ReinterpretF64: fmv_x_d(dst.gp(), src.fp()); return true; case kExprF32SConvertI32: { - TurboAssembler::Cvt_s_w(dst.fp(), src.gp()); + MacroAssembler::Cvt_s_w(dst.fp(), src.gp()); return true; } case kExprF32UConvertI32: - TurboAssembler::Cvt_s_uw(dst.fp(), src.gp()); + MacroAssembler::Cvt_s_uw(dst.fp(), src.gp()); return true; case kExprF32ReinterpretI32: fmv_w_x(dst.fp(), src.gp()); return true; case kExprF64SConvertI32: { - TurboAssembler::Cvt_d_w(dst.fp(), src.gp()); + MacroAssembler::Cvt_d_w(dst.fp(), src.gp()); return true; } case kExprF64UConvertI32: - TurboAssembler::Cvt_d_uw(dst.fp(), src.gp()); + MacroAssembler::Cvt_d_uw(dst.fp(), src.gp()); return true; case kExprF64ConvertF32: fcvt_d_s(dst.fp(), src.fp()); @@ -1286,11 +1286,11 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, } void LiftoffAssembler::emit_jump(Label* label) { - TurboAssembler::Branch(label); + MacroAssembler::Branch(label); } void LiftoffAssembler::emit_jump(Register target) { - TurboAssembler::Jump(target); + MacroAssembler::Jump(target); } void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, @@ -1299,44 +1299,44 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, const FreezeCacheState& frozen) { if (rhs == no_reg) { DCHECK(kind == kI32 || kind == kI64); - TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); + MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg)); } else { DCHECK((kind == kI32 || kind == kI64) || (is_reference(kind) && (cond == kEqual || cond == kNotEqual))); - TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); + MacroAssembler::Branch(label, cond, lhs, Operand(rhs)); } } void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, Register lhs, int32_t imm, const FreezeCacheState& frozen) { - TurboAssembler::Branch(label, cond, lhs, Operand(imm)); + MacroAssembler::Branch(label, cond, lhs, Operand(imm)); } void LiftoffAssembler::emit_i32_subi_jump_negative( Register value, int subtrahend, Label* result_negative, const FreezeCacheState& frozen) { Sub64(value, value, Operand(subtrahend)); - TurboAssembler::Branch(result_negative, lt, value, Operand(zero_reg)); + MacroAssembler::Branch(result_negative, lt, value, Operand(zero_reg)); } void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { - TurboAssembler::Sltu(dst, src, 1); + MacroAssembler::Sltu(dst, src, 1); } void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, Register lhs, Register rhs) { - TurboAssembler::CompareI(dst, lhs, Operand(rhs), cond); + MacroAssembler::CompareI(dst, lhs, Operand(rhs), cond); } void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { - TurboAssembler::Sltu(dst, src.gp(), 1); + MacroAssembler::Sltu(dst, src.gp(), 1); } void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::CompareI(dst, lhs.gp(), Operand(rhs.gp()), cond); + MacroAssembler::CompareI(dst, lhs.gp(), Operand(rhs.gp()), cond); } void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) { diff --git a/src/wasm/baseline/s390/liftoff-assembler-s390.h b/src/wasm/baseline/s390/liftoff-assembler-s390.h index d4f92e2031..0b3312eb0b 100644 --- a/src/wasm/baseline/s390/liftoff-assembler-s390.h +++ b/src/wasm/baseline/s390/liftoff-assembler-s390.h @@ -182,7 +182,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( bind(&continuation); // Now allocate the stack space. Note that this might do more than just - // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. + // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}. lay(sp, MemOperand(sp, -frame_size)); // Jump back to the start of the function, from {pc_offset()} to @@ -2966,7 +2966,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() { void LiftoffAssembler::AssertUnreachable(AbortReason reason) { // Asserts unreachable within the wasm code. - TurboAssembler::AssertUnreachable(reason); + MacroAssembler::AssertUnreachable(reason); } void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { @@ -3120,7 +3120,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { lay(sp, MemOperand(sp, -size)); - TurboAssembler::Move(addr, sp); + MacroAssembler::Move(addr, sp); } void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { diff --git a/src/wasm/baseline/x64/liftoff-assembler-x64.h b/src/wasm/baseline/x64/liftoff-assembler-x64.h index e1a98a890a..3947f65ce7 100644 --- a/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -66,7 +66,7 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, } // Offset immediate does not fit in 31 bits. Register scratch = kScratchRegister; - assm->TurboAssembler::Move(scratch, offset_imm); + assm->MacroAssembler::Move(scratch, offset_imm); if (offset_reg != no_reg) assm->addq(scratch, offset_reg); return Operand(addr, scratch, scale_factor, 0); } @@ -270,7 +270,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( bind(&continuation); // Now allocate the stack space. Note that this might do more than just - // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. + // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}. AllocateStackSpace(frame_size); // Jump back to the start of the function, from {pc_offset()} to @@ -309,16 +309,16 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, break; case kI64: if (RelocInfo::IsNoInfo(rmode)) { - TurboAssembler::Move(reg.gp(), value.to_i64()); + MacroAssembler::Move(reg.gp(), value.to_i64()); } else { movq(reg.gp(), Immediate64(value.to_i64(), rmode)); } break; case kF32: - TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); break; case kF64: - TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); break; default: UNREACHABLE(); @@ -1339,7 +1339,7 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm) { if (!is_int32(imm)) { - TurboAssembler::Move(kScratchRegister, imm); + MacroAssembler::Move(kScratchRegister, imm); if (lhs.gp() == dst.gp()) { addq(dst.gp(), kScratchRegister); } else { @@ -1640,10 +1640,10 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) { static constexpr uint32_t kSignBit = uint32_t{1} << 31; if (dst == src) { - TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1); + MacroAssembler::Move(kScratchDoubleReg, kSignBit - 1); Andps(dst, kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit - 1); + MacroAssembler::Move(dst, kSignBit - 1); Andps(dst, src); } } @@ -1651,10 +1651,10 @@ void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { static constexpr uint32_t kSignBit = uint32_t{1} << 31; if (dst == src) { - TurboAssembler::Move(kScratchDoubleReg, kSignBit); + MacroAssembler::Move(kScratchDoubleReg, kSignBit); Xorps(dst, kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit); + MacroAssembler::Move(dst, kSignBit); Xorps(dst, src); } } @@ -1773,10 +1773,10 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) { static constexpr uint64_t kSignBit = uint64_t{1} << 63; if (dst == src) { - TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1); + MacroAssembler::Move(kScratchDoubleReg, kSignBit - 1); Andpd(dst, kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit - 1); + MacroAssembler::Move(dst, kSignBit - 1); Andpd(dst, src); } } @@ -1784,10 +1784,10 @@ void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { static constexpr uint64_t kSignBit = uint64_t{1} << 63; if (dst == src) { - TurboAssembler::Move(kScratchDoubleReg, kSignBit); + MacroAssembler::Move(kScratchDoubleReg, kSignBit); Xorpd(dst, kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit); + MacroAssembler::Move(dst, kSignBit); Xorpd(dst, src); } } @@ -2234,7 +2234,8 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, } namespace liftoff { -template +template void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { Label cont; @@ -2261,14 +2262,14 @@ void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst, void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { - liftoff::EmitFloatSetCond<&TurboAssembler::Ucomiss>(this, cond, dst, lhs, + liftoff::EmitFloatSetCond<&MacroAssembler::Ucomiss>(this, cond, dst, lhs, rhs); } void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { - liftoff::EmitFloatSetCond<&TurboAssembler::Ucomisd>(this, cond, dst, lhs, + liftoff::EmitFloatSetCond<&MacroAssembler::Ucomisd>(this, cond, dst, lhs, rhs); } @@ -2394,7 +2395,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst, assm->setcc(not_equal, dst.gp()); } -template +template inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src, base::Optional feature = base::nullopt) { @@ -2501,7 +2502,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst, uint32_t imms[4]; // Shuffles that use just 1 operand are called swizzles, rhs can be ignored. wasm::SimdShuffle::Pack16Lanes(imms, shuffle); - TurboAssembler::Move(kScratchDoubleReg, make_uint64(imms[3], imms[2]), + MacroAssembler::Move(kScratchDoubleReg, make_uint64(imms[3], imms[2]), make_uint64(imms[1], imms[0])); Pshufb(dst.fp(), lhs.fp(), kScratchDoubleReg); return; @@ -2514,7 +2515,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst, mask1[j] <<= 8; mask1[j] |= lane < kSimd128Size ? lane : 0x80; } - TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask1[1], mask1[0]); + MacroAssembler::Move(liftoff::kScratchDoubleReg2, mask1[1], mask1[0]); Pshufb(kScratchDoubleReg, lhs.fp(), liftoff::kScratchDoubleReg2); uint64_t mask2[2] = {}; @@ -2524,7 +2525,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst, mask2[j] <<= 8; mask2[j] |= lane >= kSimd128Size ? (lane & 0x0F) : 0x80; } - TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask2[1], mask2[0]); + MacroAssembler::Move(liftoff::kScratchDoubleReg2, mask2[1], mask2[0]); Pshufb(dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg2); Por(dst.fp(), kScratchDoubleReg); @@ -2901,7 +2902,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst, const uint8_t imms[16]) { uint64_t vals[2]; memcpy(vals, imms, sizeof(vals)); - TurboAssembler::Move(dst.fp(), vals[1], vals[0]); + MacroAssembler::Move(dst.fp(), vals[1], vals[0]); } void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) { @@ -2959,7 +2960,7 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqb>(this, dst, src); } void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst, @@ -3084,7 +3085,7 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqw>(this, dst, src); } void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst, @@ -3294,7 +3295,7 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqd>(this, dst, src); } void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst, @@ -3462,7 +3463,7 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqq>(this, dst, src, SSE4_1); } void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, @@ -4161,7 +4162,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() { } void LiftoffAssembler::AssertUnreachable(AbortReason reason) { - TurboAssembler::AssertUnreachable(reason); + MacroAssembler::AssertUnreachable(reason); } void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { diff --git a/src/wasm/jump-table-assembler.cc b/src/wasm/jump-table-assembler.cc index e2f5e2b85b..79c5e50c73 100644 --- a/src/wasm/jump-table-assembler.cc +++ b/src/wasm/jump-table-assembler.cc @@ -203,7 +203,7 @@ bool JumpTableAssembler::EmitJumpSlot(Address target) { ptrdiff_t jump_distance = reinterpret_cast(target) - jump_pc; DCHECK_EQ(0, jump_distance % kInstrSize); int64_t instr_offset = jump_distance / kInstrSize; - if (!TurboAssembler::IsNearCallOffset(instr_offset)) { + if (!MacroAssembler::IsNearCallOffset(instr_offset)) { return false; } diff --git a/src/wasm/jump-table-assembler.h b/src/wasm/jump-table-assembler.h index b545d51a28..eeb399996b 100644 --- a/src/wasm/jump-table-assembler.h +++ b/src/wasm/jump-table-assembler.h @@ -57,7 +57,7 @@ namespace wasm { // execute the old code afterwards, which is no problem, since that code remains // available until it is garbage collected. Garbage collection itself is a // synchronization barrier though. -class V8_EXPORT_PRIVATE JumpTableAssembler : public TurboAssembler { +class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { public: // Translate an offset into the continuous jump table to a jump table index. static uint32_t SlotOffsetToIndex(uint32_t slot_offset) { @@ -175,7 +175,7 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public TurboAssembler { private: // Instantiate a {JumpTableAssembler} for patching. explicit JumpTableAssembler(Address slot_addr, int size = 256) - : TurboAssembler(nullptr, JumpTableAssemblerOptions(), + : MacroAssembler(nullptr, JumpTableAssemblerOptions(), CodeObjectRequired::kNo, ExternalAssemblerBuffer( reinterpret_cast(slot_addr), size)) {} diff --git a/test/cctest/compiler/test-code-generator.cc b/test/cctest/compiler/test-code-generator.cc index d52574c303..ebe055992d 100644 --- a/test/cctest/compiler/test-code-generator.cc +++ b/test/cctest/compiler/test-code-generator.cc @@ -1147,7 +1147,7 @@ class CodeGeneratorTester { Builtin::kNoBuiltinId, kMaxUnoptimizedFrameHeight, kMaxPushedArgumentCount); - generator_->tasm()->CodeEntry(); + generator_->masm()->CodeEntry(); // Force a frame to be created. generator_->frame_access_state()->MarkHasFrame(true); @@ -1239,10 +1239,10 @@ class CodeGeneratorTester { void CheckAssembleMove(InstructionOperand* source, InstructionOperand* destination) { - int start = generator_->tasm()->pc_offset(); + int start = generator_->masm()->pc_offset(); generator_->AssembleMove(MaybeTranslateSlot(source), MaybeTranslateSlot(destination)); - CHECK(generator_->tasm()->pc_offset() > start); + CHECK(generator_->masm()->pc_offset() > start); } void CheckAssembleMoves(ParallelMove* moves) { @@ -1255,15 +1255,15 @@ class CodeGeneratorTester { void CheckAssembleSwap(InstructionOperand* source, InstructionOperand* destination) { - int start = generator_->tasm()->pc_offset(); + int start = generator_->masm()->pc_offset(); generator_->AssembleSwap(MaybeTranslateSlot(source), MaybeTranslateSlot(destination)); - CHECK(generator_->tasm()->pc_offset() > start); + CHECK(generator_->masm()->pc_offset() > start); } Handle Finalize() { generator_->FinishCode(); - generator_->safepoints()->Emit(generator_->tasm(), + generator_->safepoints()->Emit(generator_->masm(), frame_.GetTotalFrameSlotCount()); generator_->MaybeEmitOutOfLineConstantPool(); diff --git a/test/cctest/test-assembler-arm64.cc b/test/cctest/test-assembler-arm64.cc index 27b06a14ee..282488b0d5 100644 --- a/test/cctest/test-assembler-arm64.cc +++ b/test/cctest/test-assembler-arm64.cc @@ -12408,7 +12408,7 @@ static void PushPopSimpleHelper(int reg_count, int reg_size, case PushPopByFour: // Push high-numbered registers first (to the highest addresses). for (i = reg_count; i >= 4; i -= 4) { - __ Push(r[i - 1], r[i - 2], r[i - 3], + __ Push(r[i - 1], r[i - 2], r[i - 3], r[i - 4]); } // Finish off the leftovers. @@ -12433,7 +12433,7 @@ static void PushPopSimpleHelper(int reg_count, int reg_size, case PushPopByFour: // Pop low-numbered registers first (from the lowest addresses). for (i = 0; i <= (reg_count-4); i += 4) { - __ Pop(r[i], r[i + 1], r[i + 2], + __ Pop(r[i], r[i + 1], r[i + 2], r[i + 3]); } // Finish off the leftovers. @@ -12975,7 +12975,7 @@ TEST(copy_double_words_downwards_even) { __ SlotAddress(x5, 12); __ SlotAddress(x6, 11); __ Mov(x7, 12); - __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst); + __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kSrcLessThanDst); __ Pop(xzr, x4, x5, x6); __ Pop(x7, x8, x9, x10); @@ -13029,7 +13029,7 @@ TEST(copy_double_words_downwards_odd) { __ SlotAddress(x5, 13); __ SlotAddress(x6, 12); __ Mov(x7, 13); - __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst); + __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kSrcLessThanDst); __ Pop(xzr, x4); __ Pop(x5, x6, x7, x8); @@ -13085,13 +13085,13 @@ TEST(copy_noop) { __ SlotAddress(x5, 3); __ SlotAddress(x6, 2); __ Mov(x7, 0); - __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst); + __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kSrcLessThanDst); // dst < src, count == 0 __ SlotAddress(x5, 2); __ SlotAddress(x6, 3); __ Mov(x7, 0); - __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kDstLessThanSrc); + __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kDstLessThanSrc); __ Pop(x1, x2, x3, x4); __ Pop(x5, x6, x7, x8); diff --git a/test/cctest/test-assembler-mips64.cc b/test/cctest/test-assembler-mips64.cc index b8d6b29f54..5e075118f8 100644 --- a/test/cctest/test-assembler-mips64.cc +++ b/test/cctest/test-assembler-mips64.cc @@ -6195,11 +6195,11 @@ TEST(Trampoline_with_massive_unbound_labels) { MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); const int kNumSlots = - TurboAssembler::kMaxBranchOffset / TurboAssembler::kTrampolineSlotsSize; + MacroAssembler::kMaxBranchOffset / MacroAssembler::kTrampolineSlotsSize; Label labels[kNumSlots]; { - TurboAssembler::BlockTrampolinePoolScope block_trampoline_pool(&assm); + MacroAssembler::BlockTrampolinePoolScope block_trampoline_pool(&assm); for (int i = 0; i < kNumSlots; i++) { __ Branch(&labels[i]); } @@ -6218,12 +6218,12 @@ TEST(Call_with_trampoline) { int next_buffer_check_ = v8_flags.force_long_branches ? kMaxInt - : TurboAssembler::kMaxBranchOffset - - TurboAssembler::kTrampolineSlotsSize * 16; + : MacroAssembler::kMaxBranchOffset - + MacroAssembler::kTrampolineSlotsSize * 16; Label done; __ Branch(&done); - next_buffer_check_ -= TurboAssembler::kTrampolineSlotsSize; + next_buffer_check_ -= MacroAssembler::kTrampolineSlotsSize; int num_nops = (next_buffer_check_ - __ pc_offset()) / kInstrSize - 1; for (int i = 0; i < num_nops; i++) { diff --git a/test/unittests/BUILD.gn b/test/unittests/BUILD.gn index cb428e3985..bf90543df9 100644 --- a/test/unittests/BUILD.gn +++ b/test/unittests/BUILD.gn @@ -635,7 +635,7 @@ v8_source_set("unittests_sources") { if (v8_current_cpu == "arm") { sources += [ "assembler/disasm-arm-unittest.cc", - "assembler/turbo-assembler-arm-unittest.cc", + "assembler/macro-assembler-arm-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/arm/instruction-selector-arm-unittest.cc" ] @@ -644,7 +644,6 @@ v8_source_set("unittests_sources") { sources += [ "assembler/disasm-arm64-unittest.cc", "assembler/macro-assembler-arm64-unittest.cc", - "assembler/turbo-assembler-arm64-unittest.cc", "codegen/pointer-auth-arm64-unittest.cc", ] if (v8_enable_turbofan) { @@ -656,7 +655,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "x86") { sources += [ "assembler/disasm-ia32-unittest.cc", - "assembler/turbo-assembler-ia32-unittest.cc", + "assembler/macro-assembler-ia32-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/ia32/instruction-selector-ia32-unittest.cc" ] @@ -664,7 +663,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { sources += [ "assembler/disasm-mips64-unittest.cc", - "assembler/turbo-assembler-mips64-unittest.cc", + "assembler/macro-assembler-mips64-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/mips64/instruction-selector-mips64-unittest.cc" ] @@ -672,7 +671,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "riscv64") { sources += [ "assembler/disasm-riscv-unittest.cc", - "assembler/turbo-assembler-riscv-unittest.cc", + "assembler/macro-assembler-riscv-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/riscv64/instruction-selector-riscv64-unittest.cc" ] @@ -680,7 +679,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "riscv32") { sources += [ "assembler/disasm-riscv-unittest.cc", - "assembler/turbo-assembler-riscv-unittest.cc", + "assembler/macro-assembler-riscv-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/riscv32/instruction-selector-riscv32-unittest.cc" ] @@ -690,7 +689,6 @@ v8_source_set("unittests_sources") { "assembler/assembler-x64-unittest.cc", "assembler/disasm-x64-unittest.cc", "assembler/macro-assembler-x64-unittest.cc", - "assembler/turbo-assembler-x64-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/x64/instruction-selector-x64-unittest.cc" ] @@ -701,7 +699,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { sources += [ "assembler/disasm-ppc-unittest.cc", - "assembler/turbo-assembler-ppc-unittest.cc", + "assembler/macro-assembler-ppc-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/ppc/instruction-selector-ppc-unittest.cc" ] @@ -709,7 +707,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { sources += [ "assembler/disasm-s390-unittest.cc", - "assembler/turbo-assembler-s390-unittest.cc", + "assembler/macro-assembler-s390-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/s390/instruction-selector-s390-unittest.cc" ] @@ -717,7 +715,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "loong64") { sources += [ "assembler/disasm-loong64-unittest.cc", - "assembler/turbo-assembler-loong64-unittest.cc", + "assembler/macro-assembler-loong64-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/loong64/instruction-selector-loong64-unittest.cc" ] diff --git a/test/unittests/assembler/turbo-assembler-arm-unittest.cc b/test/unittests/assembler/macro-assembler-arm-unittest.cc similarity index 86% rename from test/unittests/assembler/turbo-assembler-arm-unittest.cc rename to test/unittests/assembler/macro-assembler-arm-unittest.cc index 6fa1bd5927..f7ec44e77f 100644 --- a/test/unittests/assembler/turbo-assembler-arm-unittest.cc +++ b/test/unittests/assembler/macro-assembler-arm-unittest.cc @@ -13,7 +13,7 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // If we are running on android and the output is not redirected (i.e. ends up // in the android log) then we cannot find the error message in the output. This @@ -28,11 +28,11 @@ namespace internal { // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -40,7 +40,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); @@ -48,9 +48,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason")); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -62,7 +62,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); @@ -102,17 +102,17 @@ const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = { const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001}; template -class TurboAssemblerTestWithParam : public TurboAssemblerTest, +class MacroAssemblerTestWithParam : public MacroAssemblerTest, public ::testing::WithParamInterface {}; -using TurboAssemblerTestMoveObjectAndSlot = - TurboAssemblerTestWithParam; +using MacroAssemblerTestMoveObjectAndSlot = + MacroAssemblerTestWithParam; -TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { +TEST_P(MacroAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { const MoveObjectAndSlotTestCase test_case = GetParam(); TRACED_FOREACH(int32_t, offset, kOffsets) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ Push(r0); __ Move(test_case.object, r1); @@ -143,7 +143,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { __ RecordComment("--"); // The `result` pointer was saved on the stack. - UseScratchRegisterScope temps(&tasm); + UseScratchRegisterScope temps(&masm); Register scratch = temps.Acquire(); __ Pop(scratch); __ str(dst_object, MemOperand(scratch)); @@ -152,7 +152,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { __ Ret(); CodeDesc desc; - tasm.GetCode(nullptr, &desc); + masm.GetCode(nullptr, &desc); if (v8_flags.print_code) { Handle code = Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING).Build(); @@ -179,8 +179,8 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { } } -INSTANTIATE_TEST_SUITE_P(TurboAssemblerTest, - TurboAssemblerTestMoveObjectAndSlot, +INSTANTIATE_TEST_SUITE_P(MacroAssemblerTest, + MacroAssemblerTestMoveObjectAndSlot, ::testing::ValuesIn(kMoveObjectAndSlotTestCases)); #undef __ diff --git a/test/unittests/assembler/macro-assembler-arm64-unittest.cc b/test/unittests/assembler/macro-assembler-arm64-unittest.cc index 021b0423f3..3bbbc49096 100644 --- a/test/unittests/assembler/macro-assembler-arm64-unittest.cc +++ b/test/unittests/assembler/macro-assembler-arm64-unittest.cc @@ -1,129 +1,254 @@ -// Copyright 2019 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include - -#include "src/codegen/arm64/assembler-arm64-inl.h" -#include "src/codegen/macro-assembler-inl.h" -#include "src/deoptimizer/deoptimizer.h" -#include "src/heap/factory.h" -#include "src/objects/objects-inl.h" +#include "src/codegen/arm64/macro-assembler-arm64-inl.h" +#include "src/codegen/macro-assembler.h" +#include "src/execution/simulator.h" #include "src/utils/ostreams.h" #include "test/common/assembler-tester.h" #include "test/unittests/test-utils.h" +#include "testing/gtest-support.h" namespace v8 { namespace internal { -namespace test_macro_assembler_arm64 { - -using MacroAssemblerArm64Test = TestWithIsolate; - -using F0 = int(); #define __ masm. -TEST_F(MacroAssemblerArm64Test, EmbeddedObj) { -#ifdef V8_COMPRESS_POINTERS - Isolate* isolate = i_isolate(); - HandleScope handles(isolate); - - auto buffer = AllocateAssemblerBuffer(); - MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, - buffer->CreateView()); - - AssemblerBufferWriteScope rw_scope(*buffer); - - Handle old_array = isolate->factory()->NewFixedArray(2000); - Handle my_array = isolate->factory()->NewFixedArray(1000); - __ Mov(w4, Immediate(my_array, RelocInfo::COMPRESSED_EMBEDDED_OBJECT)); - __ Mov(x5, old_array); - __ ret(x5); - - CodeDesc desc; - masm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef DEBUG - StdoutStream os; - code->Print(os); +// If we are running on android and the output is not redirected (i.e. ends up +// in the android log) then we cannot find the error message in the output. This +// macro just returns the empty string in that case. +#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) +#define ERROR_MESSAGE(msg) "" +#else +#define ERROR_MESSAGE(msg) msg #endif - // Collect garbage to ensure reloc info can be walked by the heap. - CollectAllGarbage(); - CollectAllGarbage(); - CollectAllGarbage(); +// Test the x64 assembler by compiling some simple functions into +// a buffer and executing them. These tests do not initialize the +// V8 library, create a context, or use any V8 objects. - PtrComprCageBase cage_base(isolate); +class MacroAssemblerTest : public TestWithIsolate {}; - // Test the user-facing reloc interface. - const int mode_mask = RelocInfo::EmbeddedObjectModeMask(); - for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { - RelocInfo::Mode mode = it.rinfo()->rmode(); - if (RelocInfo::IsCompressedEmbeddedObject(mode)) { - CHECK_EQ(*my_array, it.rinfo()->target_object(cage_base)); - } else { - CHECK(RelocInfo::IsFullEmbeddedObject(mode)); - CHECK_EQ(*old_array, it.rinfo()->target_object(cage_base)); - } - } -#endif // V8_COMPRESS_POINTERS -} - -TEST_F(MacroAssemblerArm64Test, DeoptExitSizeIsFixed) { - Isolate* isolate = i_isolate(); - HandleScope handles(isolate); +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); + __ set_root_array_available(false); + __ set_abort_hard(true); - AssemblerBufferWriteScope rw_scope(*buffer); + { + AssemblerBufferWriteScope rw_scope(*buffer); - static_assert(static_cast(kFirstDeoptimizeKind) == 0); - for (int i = 0; i < kDeoptimizeKindCount; i++) { - DeoptimizeKind kind = static_cast(i); - Label before_exit; - Builtin target = Deoptimizer::GetDeoptimizationEntry(kind); - // Mirroring logic in code-generator.cc. - if (kind == DeoptimizeKind::kLazy) { - // CFI emits an extra instruction here. - masm.BindExceptionHandler(&before_exit); - } else { - masm.bind(&before_exit); + __ CodeEntry(); + + __ Abort(AbortReason::kNoReason); + + CodeDesc desc; + masm.GetCode(isolate(), &desc); + } + // We need an isolate here to execute in the simulator. + auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); + + ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason")); +} + +TEST_F(MacroAssemblerTest, TestCheck) { + auto buffer = AllocateAssemblerBuffer(); + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + buffer->CreateView()); + __ set_root_array_available(false); + __ set_abort_hard(true); + + { + AssemblerBufferWriteScope rw_scope(*buffer); + + __ CodeEntry(); + + // Fail if the first parameter is 17. + __ Mov(w1, Immediate(17)); + __ Cmp(w0, w1); // 1st parameter is in {w0}. + __ Check(Condition::ne, AbortReason::kNoReason); + __ Ret(); + + CodeDesc desc; + masm.GetCode(isolate(), &desc); + } + // We need an isolate here to execute in the simulator. + auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); + + f.Call(0); + f.Call(18); + ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, ERROR_MESSAGE("abort: no reason")); +} + +TEST_F(MacroAssemblerTest, CompareAndBranch) { + const int kTestCases[] = {-42, 0, 42}; + static_assert(Condition::eq == 0); + static_assert(Condition::le == 13); + TRACED_FORRANGE(int, cc, 0, 13) { // All conds except al and nv + Condition cond = static_cast(cc); + TRACED_FOREACH(int, imm, kTestCases) { + auto buffer = AllocateAssemblerBuffer(); + MacroAssembler masm(isolate(), AssemblerOptions{}, + CodeObjectRequired::kNo, buffer->CreateView()); + __ set_root_array_available(false); + __ set_abort_hard(true); + + { + AssemblerBufferWriteScope rw_scope(*buffer); + + __ CodeEntry(); + + Label start, lab; + __ Bind(&start); + __ CompareAndBranch(x0, Immediate(imm), cond, &lab); + if (imm == 0 && ((cond == eq) || (cond == ne) || (cond == hi) || + (cond == ls))) { // One instruction generated + ASSERT_EQ(kInstrSize, __ SizeOfCodeGeneratedSince(&start)); + } else { // Two instructions generated + ASSERT_EQ(static_cast(2 * kInstrSize), + __ SizeOfCodeGeneratedSince(&start)); + } + __ Cmp(x0, Immediate(imm)); + __ Check(NegateCondition(cond), + AbortReason::kNoReason); // cond must not hold + __ Ret(); + __ Bind(&lab); // Branch leads here + __ Cmp(x0, Immediate(imm)); + __ Check(cond, AbortReason::kNoReason); // cond must hold + __ Ret(); + + CodeDesc desc; + masm.GetCode(isolate(), &desc); + } + // We need an isolate here to execute in the simulator. + auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); + + TRACED_FOREACH(int, n, kTestCases) { f.Call(n); } } - masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit, - &before_exit); - CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit), - kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize - : Deoptimizer::kEagerDeoptExitSize); } } +struct MoveObjectAndSlotTestCase { + const char* comment; + Register dst_object; + Register dst_slot; + Register object; + Register offset_register = no_reg; +}; + +const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = { + {"no overlap", x0, x1, x2}, + {"no overlap", x0, x1, x2, x3}, + + {"object == dst_object", x2, x1, x2}, + {"object == dst_object", x2, x1, x2, x3}, + + {"object == dst_slot", x1, x2, x2}, + {"object == dst_slot", x1, x2, x2, x3}, + + {"offset == dst_object", x0, x1, x2, x0}, + + {"offset == dst_object && object == dst_slot", x0, x1, x1, x0}, + + {"offset == dst_slot", x0, x1, x2, x1}, + + {"offset == dst_slot && object == dst_object", x0, x1, x0, x1}}; + +// Make sure we include offsets that cannot be encoded in an add instruction. +const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001}; + +template +class MacroAssemblerTestWithParam : public MacroAssemblerTest, + public ::testing::WithParamInterface {}; + +using MacroAssemblerTestMoveObjectAndSlot = + MacroAssemblerTestWithParam; + +TEST_P(MacroAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { + const MoveObjectAndSlotTestCase test_case = GetParam(); + TRACED_FOREACH(int32_t, offset, kOffsets) { + auto buffer = AllocateAssemblerBuffer(); + MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, + buffer->CreateView()); + + { + AssemblerBufferWriteScope rw_buffer_scope(*buffer); + + __ CodeEntry(); + __ Push(x0, padreg); + __ Mov(test_case.object, x1); + + Register src_object = test_case.object; + Register dst_object = test_case.dst_object; + Register dst_slot = test_case.dst_slot; + + Operand offset_operand(0); + if (test_case.offset_register == no_reg) { + offset_operand = Operand(offset); + } else { + __ Mov(test_case.offset_register, Operand(offset)); + offset_operand = Operand(test_case.offset_register); + } + + std::stringstream comment; + comment << "-- " << test_case.comment << ": MoveObjectAndSlot(" + << dst_object << ", " << dst_slot << ", " << src_object << ", "; + if (test_case.offset_register == no_reg) { + comment << "#" << offset; + } else { + comment << test_case.offset_register; + } + comment << ") --"; + __ RecordComment(comment.str().c_str()); + __ MoveObjectAndSlot(dst_object, dst_slot, src_object, offset_operand); + __ RecordComment("--"); + + // The `result` pointer was saved on the stack. + UseScratchRegisterScope temps(&masm); + Register scratch = temps.AcquireX(); + __ Pop(padreg, scratch); + __ Str(dst_object, MemOperand(scratch)); + __ Str(dst_slot, MemOperand(scratch, kSystemPointerSize)); + + __ Ret(); + + CodeDesc desc; + masm.GetCode(nullptr, &desc); + if (v8_flags.print_code) { + Handle code = + Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING) + .Build(); + StdoutStream os; + code->Print(os); + } + } + + // We need an isolate here to execute in the simulator. + auto f = GeneratedCode::FromBuffer(isolate(), + buffer->start()); + + byte* object = new byte[offset]; + byte* result[] = {nullptr, nullptr}; + + f.Call(result, object); + + // The first element must be the address of the object, and the second the + // slot addressed by `offset`. + EXPECT_EQ(result[0], &object[0]); + EXPECT_EQ(result[1], &object[offset]); + + delete[] object; + } +} + +INSTANTIATE_TEST_SUITE_P(MacroAssemblerTest, + MacroAssemblerTestMoveObjectAndSlot, + ::testing::ValuesIn(kMoveObjectAndSlotTestCases)); + #undef __ +#undef ERROR_MESSAGE -} // namespace test_macro_assembler_arm64 } // namespace internal } // namespace v8 diff --git a/test/unittests/assembler/turbo-assembler-ia32-unittest.cc b/test/unittests/assembler/macro-assembler-ia32-unittest.cc similarity index 82% rename from test/unittests/assembler/turbo-assembler-ia32-unittest.cc rename to test/unittests/assembler/macro-assembler-ia32-unittest.cc index f0cb96d47d..cbf628ba88 100644 --- a/test/unittests/assembler/turbo-assembler-ia32-unittest.cc +++ b/test/unittests/assembler/macro-assembler-ia32-unittest.cc @@ -11,17 +11,17 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // Test the x64 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -29,16 +29,16 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -50,7 +50,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ ret(0); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); diff --git a/test/unittests/assembler/turbo-assembler-loong64-unittest.cc b/test/unittests/assembler/macro-assembler-loong64-unittest.cc similarity index 83% rename from test/unittests/assembler/turbo-assembler-loong64-unittest.cc rename to test/unittests/assembler/macro-assembler-loong64-unittest.cc index 5334fb4be3..a2cc213cae 100644 --- a/test/unittests/assembler/turbo-assembler-loong64-unittest.cc +++ b/test/unittests/assembler/macro-assembler-loong64-unittest.cc @@ -12,33 +12,33 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // Test the loong64 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -48,7 +48,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); diff --git a/test/unittests/assembler/turbo-assembler-mips64-unittest.cc b/test/unittests/assembler/macro-assembler-mips64-unittest.cc similarity index 83% rename from test/unittests/assembler/turbo-assembler-mips64-unittest.cc rename to test/unittests/assembler/macro-assembler-mips64-unittest.cc index c954ffcc65..92e3b1d6f8 100644 --- a/test/unittests/assembler/turbo-assembler-mips64-unittest.cc +++ b/test/unittests/assembler/macro-assembler-mips64-unittest.cc @@ -12,17 +12,17 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // Test the x64 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -30,7 +30,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); @@ -38,9 +38,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -50,7 +50,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); diff --git a/test/unittests/assembler/turbo-assembler-ppc-unittest.cc b/test/unittests/assembler/macro-assembler-ppc-unittest.cc similarity index 58% rename from test/unittests/assembler/turbo-assembler-ppc-unittest.cc rename to test/unittests/assembler/macro-assembler-ppc-unittest.cc index 93ae7abafc..aabb988b29 100644 --- a/test/unittests/assembler/turbo-assembler-ppc-unittest.cc +++ b/test/unittests/assembler/macro-assembler-ppc-unittest.cc @@ -12,17 +12,17 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // Test the ppc assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -30,7 +30,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); @@ -38,9 +38,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -52,7 +52,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); @@ -62,23 +62,24 @@ TEST_F(TurboAssemblerTest, TestCheck) { ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, ReverseBitsU64) { +TEST_F(MacroAssemblerTest, ReverseBitsU64) { struct { - uint64_t expected; uint64_t input; + uint64_t expected; + uint64_t input; } values[] = { - {0x0000000000000000, 0x0000000000000000}, - {0xffffffffffffffff, 0xffffffffffffffff}, - {0x8000000000000000, 0x0000000000000001}, - {0x0000000000000001, 0x8000000000000000}, - {0x800066aa22cc4488, 0x1122334455660001}, - {0x1122334455660001, 0x800066aa22cc4488}, - {0xffffffff00000000, 0x00000000ffffffff}, - {0x00000000ffffffff, 0xffffffff00000000}, - {0xff01020304050607, 0xe060a020c04080ff}, - {0xe060a020c04080ff, 0xff01020304050607}, + {0x0000000000000000, 0x0000000000000000}, + {0xffffffffffffffff, 0xffffffffffffffff}, + {0x8000000000000000, 0x0000000000000001}, + {0x0000000000000001, 0x8000000000000000}, + {0x800066aa22cc4488, 0x1122334455660001}, + {0x1122334455660001, 0x800066aa22cc4488}, + {0xffffffff00000000, 0x00000000ffffffff}, + {0x00000000ffffffff, 0xffffffff00000000}, + {0xff01020304050607, 0xe060a020c04080ff}, + {0xe060a020c04080ff, 0xff01020304050607}, }; auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -87,28 +88,26 @@ TEST_F(TurboAssemblerTest, ReverseBitsU64) { __ Pop(r4, r5); __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); - auto f = GeneratedCode::FromBuffer(isolate(), - buffer->start()); - for (unsigned int i=0; i < (sizeof(values) / sizeof(values[0])); i++) { + auto f = + GeneratedCode::FromBuffer(isolate(), buffer->start()); + for (unsigned int i = 0; i < (sizeof(values) / sizeof(values[0])); i++) { CHECK_EQ(values[i].expected, f.Call(values[i].input)); } } -TEST_F(TurboAssemblerTest, ReverseBitsU32) { +TEST_F(MacroAssemblerTest, ReverseBitsU32) { struct { - uint64_t expected; uint64_t input; + uint64_t expected; + uint64_t input; } values[] = { - {0x00000000, 0x00000000}, - {0xffffffff, 0xffffffff}, - {0x00000001, 0x80000000}, - {0x80000000, 0x00000001}, - {0x22334455, 0xaa22cc44}, - {0xaa22cc44, 0x22334455}, + {0x00000000, 0x00000000}, {0xffffffff, 0xffffffff}, + {0x00000001, 0x80000000}, {0x80000000, 0x00000001}, + {0x22334455, 0xaa22cc44}, {0xaa22cc44, 0x22334455}, }; auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -117,11 +116,11 @@ TEST_F(TurboAssemblerTest, ReverseBitsU32) { __ Pop(r4, r5); __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); - auto f = GeneratedCode::FromBuffer(isolate(), - buffer->start()); - for (unsigned int i=0; i < (sizeof(values) / sizeof(values[0])); i++) { + auto f = + GeneratedCode::FromBuffer(isolate(), buffer->start()); + for (unsigned int i = 0; i < (sizeof(values) / sizeof(values[0])); i++) { CHECK_EQ(values[i].expected, f.Call(values[i].input)); } } diff --git a/test/unittests/assembler/turbo-assembler-riscv-unittest.cc b/test/unittests/assembler/macro-assembler-riscv-unittest.cc similarity index 83% rename from test/unittests/assembler/turbo-assembler-riscv-unittest.cc rename to test/unittests/assembler/macro-assembler-riscv-unittest.cc index afda8d3603..8e74ae692c 100644 --- a/test/unittests/assembler/turbo-assembler-riscv-unittest.cc +++ b/test/unittests/assembler/macro-assembler-riscv-unittest.cc @@ -12,33 +12,33 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // Test the x64 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(nullptr, &desc); + masm.GetCode(nullptr, &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -48,7 +48,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ Ret(); CodeDesc desc; - tasm.GetCode(nullptr, &desc); + masm.GetCode(nullptr, &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); diff --git a/test/unittests/assembler/turbo-assembler-s390-unittest.cc b/test/unittests/assembler/macro-assembler-s390-unittest.cc similarity index 83% rename from test/unittests/assembler/turbo-assembler-s390-unittest.cc rename to test/unittests/assembler/macro-assembler-s390-unittest.cc index d86a09f67c..b371c841c5 100644 --- a/test/unittests/assembler/turbo-assembler-s390-unittest.cc +++ b/test/unittests/assembler/macro-assembler-s390-unittest.cc @@ -12,17 +12,17 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // Test the s390 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -30,7 +30,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); @@ -38,9 +38,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -52,7 +52,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); diff --git a/test/unittests/assembler/macro-assembler-x64-unittest.cc b/test/unittests/assembler/macro-assembler-x64-unittest.cc index e2fdafb580..cf7e277080 100644 --- a/test/unittests/assembler/macro-assembler-x64-unittest.cc +++ b/test/unittests/assembler/macro-assembler-x64-unittest.cc @@ -40,6 +40,57 @@ namespace v8 { namespace internal { + +#define __ masm. + +// Test the x64 assembler by compiling some simple functions into +// a buffer and executing them. These tests do not initialize the +// V8 library, create a context, or use any V8 objects. + +using MacroAssemblerX64Test = TestWithIsolate; + +TEST_F(MacroAssemblerX64Test, TestHardAbort) { + auto buffer = AllocateAssemblerBuffer(); + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + buffer->CreateView()); + __ set_root_array_available(false); + __ set_abort_hard(true); + + __ Abort(AbortReason::kNoReason); + + CodeDesc desc; + masm.GetCode(isolate(), &desc); + buffer->MakeExecutable(); + auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); + + ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); +} + +TEST_F(MacroAssemblerX64Test, TestCheck) { + auto buffer = AllocateAssemblerBuffer(); + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + buffer->CreateView()); + __ set_root_array_available(false); + __ set_abort_hard(true); + + // Fail if the first parameter is 17. + __ movl(rax, Immediate(17)); + __ cmpl(rax, arg_reg_1); + __ Check(Condition::not_equal, AbortReason::kNoReason); + __ ret(0); + + CodeDesc desc; + masm.GetCode(isolate(), &desc); + buffer->MakeExecutable(); + auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); + + f.Call(0); + f.Call(18); + ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason"); +} + +#undef __ + namespace test_macro_assembler_x64 { // Test the x64 assembler by compiling some simple functions into @@ -51,8 +102,6 @@ namespace test_macro_assembler_x64 { // This calling convention is used on Linux, with GCC, and on Mac OS, // with GCC. A different convention is used on 64-bit windows. -using MacroAssemblerX64Test = TestWithIsolate; - using F0 = int(); #define __ masm-> diff --git a/test/unittests/assembler/turbo-assembler-arm64-unittest.cc b/test/unittests/assembler/turbo-assembler-arm64-unittest.cc deleted file mode 100644 index 77123ef565..0000000000 --- a/test/unittests/assembler/turbo-assembler-arm64-unittest.cc +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2018 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/codegen/arm64/macro-assembler-arm64-inl.h" -#include "src/codegen/macro-assembler.h" -#include "src/execution/simulator.h" -#include "src/utils/ostreams.h" -#include "test/common/assembler-tester.h" -#include "test/unittests/test-utils.h" -#include "testing/gtest-support.h" - -namespace v8 { -namespace internal { - -#define __ tasm. - -// If we are running on android and the output is not redirected (i.e. ends up -// in the android log) then we cannot find the error message in the output. This -// macro just returns the empty string in that case. -#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) -#define ERROR_MESSAGE(msg) "" -#else -#define ERROR_MESSAGE(msg) msg -#endif - -// Test the x64 assembler by compiling some simple functions into -// a buffer and executing them. These tests do not initialize the -// V8 library, create a context, or use any V8 objects. - -class TurboAssemblerTest : public TestWithIsolate {}; - -TEST_F(TurboAssemblerTest, TestHardAbort) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, - buffer->CreateView()); - __ set_root_array_available(false); - __ set_abort_hard(true); - - { - AssemblerBufferWriteScope rw_scope(*buffer); - - __ CodeEntry(); - - __ Abort(AbortReason::kNoReason); - - CodeDesc desc; - tasm.GetCode(isolate(), &desc); - } - // We need an isolate here to execute in the simulator. - auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); - - ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason")); -} - -TEST_F(TurboAssemblerTest, TestCheck) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, - buffer->CreateView()); - __ set_root_array_available(false); - __ set_abort_hard(true); - - { - AssemblerBufferWriteScope rw_scope(*buffer); - - __ CodeEntry(); - - // Fail if the first parameter is 17. - __ Mov(w1, Immediate(17)); - __ Cmp(w0, w1); // 1st parameter is in {w0}. - __ Check(Condition::ne, AbortReason::kNoReason); - __ Ret(); - - CodeDesc desc; - tasm.GetCode(isolate(), &desc); - } - // We need an isolate here to execute in the simulator. - auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); - - f.Call(0); - f.Call(18); - ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, ERROR_MESSAGE("abort: no reason")); -} - -TEST_F(TurboAssemblerTest, CompareAndBranch) { - const int kTestCases[] = {-42, 0, 42}; - static_assert(Condition::eq == 0); - static_assert(Condition::le == 13); - TRACED_FORRANGE(int, cc, 0, 13) { // All conds except al and nv - Condition cond = static_cast(cc); - TRACED_FOREACH(int, imm, kTestCases) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, - CodeObjectRequired::kNo, buffer->CreateView()); - __ set_root_array_available(false); - __ set_abort_hard(true); - - { - AssemblerBufferWriteScope rw_scope(*buffer); - - __ CodeEntry(); - - Label start, lab; - __ Bind(&start); - __ CompareAndBranch(x0, Immediate(imm), cond, &lab); - if (imm == 0 && ((cond == eq) || (cond == ne) || (cond == hi) || - (cond == ls))) { // One instruction generated - ASSERT_EQ(kInstrSize, __ SizeOfCodeGeneratedSince(&start)); - } else { // Two instructions generated - ASSERT_EQ(static_cast(2 * kInstrSize), - __ SizeOfCodeGeneratedSince(&start)); - } - __ Cmp(x0, Immediate(imm)); - __ Check(NegateCondition(cond), - AbortReason::kNoReason); // cond must not hold - __ Ret(); - __ Bind(&lab); // Branch leads here - __ Cmp(x0, Immediate(imm)); - __ Check(cond, AbortReason::kNoReason); // cond must hold - __ Ret(); - - CodeDesc desc; - tasm.GetCode(isolate(), &desc); - } - // We need an isolate here to execute in the simulator. - auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); - - TRACED_FOREACH(int, n, kTestCases) { f.Call(n); } - } - } -} - -struct MoveObjectAndSlotTestCase { - const char* comment; - Register dst_object; - Register dst_slot; - Register object; - Register offset_register = no_reg; -}; - -const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = { - {"no overlap", x0, x1, x2}, - {"no overlap", x0, x1, x2, x3}, - - {"object == dst_object", x2, x1, x2}, - {"object == dst_object", x2, x1, x2, x3}, - - {"object == dst_slot", x1, x2, x2}, - {"object == dst_slot", x1, x2, x2, x3}, - - {"offset == dst_object", x0, x1, x2, x0}, - - {"offset == dst_object && object == dst_slot", x0, x1, x1, x0}, - - {"offset == dst_slot", x0, x1, x2, x1}, - - {"offset == dst_slot && object == dst_object", x0, x1, x0, x1}}; - -// Make sure we include offsets that cannot be encoded in an add instruction. -const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001}; - -template -class TurboAssemblerTestWithParam : public TurboAssemblerTest, - public ::testing::WithParamInterface {}; - -using TurboAssemblerTestMoveObjectAndSlot = - TurboAssemblerTestWithParam; - -TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { - const MoveObjectAndSlotTestCase test_case = GetParam(); - TRACED_FOREACH(int32_t, offset, kOffsets) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, - buffer->CreateView()); - - { - AssemblerBufferWriteScope rw_buffer_scope(*buffer); - - __ CodeEntry(); - __ Push(x0, padreg); - __ Mov(test_case.object, x1); - - Register src_object = test_case.object; - Register dst_object = test_case.dst_object; - Register dst_slot = test_case.dst_slot; - - Operand offset_operand(0); - if (test_case.offset_register == no_reg) { - offset_operand = Operand(offset); - } else { - __ Mov(test_case.offset_register, Operand(offset)); - offset_operand = Operand(test_case.offset_register); - } - - std::stringstream comment; - comment << "-- " << test_case.comment << ": MoveObjectAndSlot(" - << dst_object << ", " << dst_slot << ", " << src_object << ", "; - if (test_case.offset_register == no_reg) { - comment << "#" << offset; - } else { - comment << test_case.offset_register; - } - comment << ") --"; - __ RecordComment(comment.str().c_str()); - __ MoveObjectAndSlot(dst_object, dst_slot, src_object, offset_operand); - __ RecordComment("--"); - - // The `result` pointer was saved on the stack. - UseScratchRegisterScope temps(&tasm); - Register scratch = temps.AcquireX(); - __ Pop(padreg, scratch); - __ Str(dst_object, MemOperand(scratch)); - __ Str(dst_slot, MemOperand(scratch, kSystemPointerSize)); - - __ Ret(); - - CodeDesc desc; - tasm.GetCode(nullptr, &desc); - if (v8_flags.print_code) { - Handle code = - Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING) - .Build(); - StdoutStream os; - code->Print(os); - } - } - - // We need an isolate here to execute in the simulator. - auto f = GeneratedCode::FromBuffer(isolate(), - buffer->start()); - - byte* object = new byte[offset]; - byte* result[] = {nullptr, nullptr}; - - f.Call(result, object); - - // The first element must be the address of the object, and the second the - // slot addressed by `offset`. - EXPECT_EQ(result[0], &object[0]); - EXPECT_EQ(result[1], &object[offset]); - - delete[] object; - } -} - -INSTANTIATE_TEST_SUITE_P(TurboAssemblerTest, - TurboAssemblerTestMoveObjectAndSlot, - ::testing::ValuesIn(kMoveObjectAndSlotTestCases)); - -#undef __ -#undef ERROR_MESSAGE - -} // namespace internal -} // namespace v8 diff --git a/test/unittests/assembler/turbo-assembler-x64-unittest.cc b/test/unittests/assembler/turbo-assembler-x64-unittest.cc deleted file mode 100644 index 43dd6b79d6..0000000000 --- a/test/unittests/assembler/turbo-assembler-x64-unittest.cc +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2018 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/codegen/macro-assembler.h" -#include "src/execution/simulator.h" -#include "test/common/assembler-tester.h" -#include "test/unittests/test-utils.h" -#include "testing/gtest-support.h" - -namespace v8 { -namespace internal { - -#define __ tasm. - -// Test the x64 assembler by compiling some simple functions into -// a buffer and executing them. These tests do not initialize the -// V8 library, create a context, or use any V8 objects. - -class TurboAssemblerTest : public TestWithIsolate {}; - -TEST_F(TurboAssemblerTest, TestHardAbort) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, - buffer->CreateView()); - __ set_root_array_available(false); - __ set_abort_hard(true); - - __ Abort(AbortReason::kNoReason); - - CodeDesc desc; - tasm.GetCode(isolate(), &desc); - buffer->MakeExecutable(); - auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); - - ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); -} - -TEST_F(TurboAssemblerTest, TestCheck) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, - buffer->CreateView()); - __ set_root_array_available(false); - __ set_abort_hard(true); - - // Fail if the first parameter is 17. - __ movl(rax, Immediate(17)); - __ cmpl(rax, arg_reg_1); - __ Check(Condition::not_equal, AbortReason::kNoReason); - __ ret(0); - - CodeDesc desc; - tasm.GetCode(isolate(), &desc); - buffer->MakeExecutable(); - auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); - - f.Call(0); - f.Call(18); - ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason"); -} - -#undef __ - -} // namespace internal -} // namespace v8