[codegen] Merge Turbo and Macro assemblers

There is no real difference between MacroAssembler and TurboAssembler
anymore. Initially the idea was to differentiate thread-safe
operations, but it got out of hand. With LocalHeaps we could ensure
differently by passing a local_isolate.

In this CL:

TurboAssemblerBase was renamed to MacroAssemblerBase
The file containing it also renamed from turbo-assembler to macro-assembler-base.

TurboAssembler and MacroAssembler were merged into MacroAssembler
in each of the architectures.

turbo-assembler-unittests-arch were included in
macro-assembler-unittests-arch

tasm renamed to masm

Bug: v8:13707
Change-Id: I716bbfc51b33ac890c72e8541e01af0af41b6770
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4212396
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Reviewed-by: Jakob Linke <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#85610}
This commit is contained in:
Victor Gomes 2023-02-02 09:35:13 +01:00 committed by V8 LUCI CQ
parent 7eff3cee05
commit ff1dba398d
108 changed files with 5101 additions and 5322 deletions

View File

@ -1243,8 +1243,8 @@ filegroup(
"src/codegen/tick-counter.h", "src/codegen/tick-counter.h",
"src/codegen/tnode.cc", "src/codegen/tnode.cc",
"src/codegen/tnode.h", "src/codegen/tnode.h",
"src/codegen/turbo-assembler.cc", "src/codegen/macro-assembler-base.cc",
"src/codegen/turbo-assembler.h", "src/codegen/macro-assembler-base.h",
"src/codegen/unoptimized-compilation-info.cc", "src/codegen/unoptimized-compilation-info.cc",
"src/codegen/unoptimized-compilation-info.h", "src/codegen/unoptimized-compilation-info.h",
"src/common/assert-scope.cc", "src/common/assert-scope.cc",

View File

@ -2833,6 +2833,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/interface-descriptors.h", "src/codegen/interface-descriptors.h",
"src/codegen/label.h", "src/codegen/label.h",
"src/codegen/machine-type.h", "src/codegen/machine-type.h",
"src/codegen/macro-assembler-base.h",
"src/codegen/macro-assembler-inl.h", "src/codegen/macro-assembler-inl.h",
"src/codegen/macro-assembler.h", "src/codegen/macro-assembler.h",
"src/codegen/maglev-safepoint-table.h", "src/codegen/maglev-safepoint-table.h",
@ -2853,7 +2854,6 @@ v8_header_set("v8_internal_headers") {
"src/codegen/source-position.h", "src/codegen/source-position.h",
"src/codegen/tick-counter.h", "src/codegen/tick-counter.h",
"src/codegen/tnode.h", "src/codegen/tnode.h",
"src/codegen/turbo-assembler.h",
"src/codegen/unoptimized-compilation-info.h", "src/codegen/unoptimized-compilation-info.h",
"src/common/assert-scope.h", "src/common/assert-scope.h",
"src/common/checks.h", "src/common/checks.h",
@ -4581,6 +4581,7 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/handler-table.cc", "src/codegen/handler-table.cc",
"src/codegen/interface-descriptors.cc", "src/codegen/interface-descriptors.cc",
"src/codegen/machine-type.cc", "src/codegen/machine-type.cc",
"src/codegen/macro-assembler-base.cc",
"src/codegen/maglev-safepoint-table.cc", "src/codegen/maglev-safepoint-table.cc",
"src/codegen/optimized-compilation-info.cc", "src/codegen/optimized-compilation-info.cc",
"src/codegen/pending-optimization-table.cc", "src/codegen/pending-optimization-table.cc",
@ -4591,7 +4592,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/source-position.cc", "src/codegen/source-position.cc",
"src/codegen/tick-counter.cc", "src/codegen/tick-counter.cc",
"src/codegen/tnode.cc", "src/codegen/tnode.cc",
"src/codegen/turbo-assembler.cc",
"src/codegen/unoptimized-compilation-info.cc", "src/codegen/unoptimized-compilation-info.cc",
"src/common/assert-scope.cc", "src/common/assert-scope.cc",
"src/common/code-memory-access.cc", "src/common/code-memory-access.cc",

View File

@ -570,8 +570,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE); __ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments. // Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret(); __ masm()->Ret();
} }

View File

@ -571,7 +571,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
{ {
const int instruction_count = const int instruction_count =
num_labels * instructions_per_label + instructions_per_jump_target; num_labels * instructions_per_label + instructions_per_jump_target;
TurboAssembler::BlockPoolsScope block_pools(masm_, MacroAssembler::BlockPoolsScope block_pools(masm_,
instruction_count * kInstrSize); instruction_count * kInstrSize);
__ Bind(&table); __ Bind(&table);
for (int i = 0; i < num_labels; ++i) { for (int i = 0; i < num_labels; ++i) {
@ -630,7 +630,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE); __ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments. // Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIncludesReceiver); __ masm()->DropArguments(params_size, MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret(); __ masm()->Ret();
} }

View File

@ -539,8 +539,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// Drop receiver + arguments. // Drop receiver + arguments.
__ masm()->DropArguments(params_size, scratch, __ masm()->DropArguments(params_size, scratch,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret(); __ masm()->Ret();
} }

View File

@ -533,8 +533,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE); __ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments. // Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret(); __ masm()->Ret();
} }

View File

@ -544,8 +544,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE); __ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments. // Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret(); __ masm()->Ret();
} }

View File

@ -684,8 +684,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE); __ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments. // Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret(); __ masm()->Ret();
} }

View File

@ -508,7 +508,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
__ CalcScaledAddress(t6, t6, reg, entry_size_log2); __ CalcScaledAddress(t6, t6, reg, entry_size_log2);
__ Jump(t6); __ Jump(t6);
{ {
TurboAssembler::BlockTrampolinePoolScope(masm()); MacroAssembler::BlockTrampolinePoolScope(masm());
__ BlockTrampolinePoolFor(num_labels * kInstrSize * 2); __ BlockTrampolinePoolFor(num_labels * kInstrSize * 2);
__ bind(&table); __ bind(&table);
for (int i = 0; i < num_labels; ++i) { for (int i = 0; i < num_labels; ++i) {

View File

@ -692,8 +692,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE); __ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments. // Drop receiver + arguments.
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret(); __ masm()->Ret();
} }

View File

@ -587,8 +587,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// Drop receiver + arguments. // Drop receiver + arguments.
__ masm()->DropArguments(params_size, scratch, __ masm()->DropArguments(params_size, scratch,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ masm()->Ret(); __ masm()->Ret();
} }

View File

@ -130,8 +130,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
} }
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi, __ DropArguments(scratch, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ Jump(lr); __ Jump(lr);
__ bind(&stack_overflow); __ bind(&stack_overflow);
@ -278,8 +278,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT); __ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(r1, TurboAssembler::kCountIsSmi, __ DropArguments(r1, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ Jump(lr); __ Jump(lr);
__ bind(&check_receiver); __ bind(&check_receiver);
@ -826,8 +826,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LeaveFrame(StackFrame::INTERPRETED); __ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments. // Drop receiver + arguments.
__ DropArguments(params_size, TurboAssembler::kCountIsBytes, __ DropArguments(params_size, MacroAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// Advance the current bytecode offset. This simulates what all bytecode // Advance the current bytecode offset. This simulates what all bytecode
@ -1352,7 +1352,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ sub(start_address, start_address, scratch); __ sub(start_address, start_address, scratch);
// Push the arguments. // Push the arguments.
__ PushArray(start_address, num_args, scratch, __ PushArray(start_address, num_args, scratch,
TurboAssembler::PushArrayOrder::kReverse); MacroAssembler::PushArrayOrder::kReverse);
} }
// static // static
@ -1820,8 +1820,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg __ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
__ cmp(r0, Operand(JSParameterCount(2)), ge); __ cmp(r0, Operand(JSParameterCount(2)), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
__ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger, __ DropArgumentsAndPushNewReceiver(r0, r5, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -1897,8 +1897,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument __ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
__ cmp(r0, Operand(JSParameterCount(3)), ge); __ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList __ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
__ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger, __ DropArgumentsAndPushNewReceiver(r0, r5, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -1940,8 +1940,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
__ cmp(r0, Operand(JSParameterCount(3)), ge); __ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target __ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
__ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger, __ DropArgumentsAndPushNewReceiver(r0, r4, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------

View File

@ -163,7 +163,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
} }
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(x1, TurboAssembler::kCountIncludesReceiver); __ DropArguments(x1, MacroAssembler::kCountIncludesReceiver);
__ Ret(); __ Ret();
__ Bind(&stack_overflow); __ Bind(&stack_overflow);
@ -348,7 +348,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Leave construct frame. // Leave construct frame.
__ LeaveFrame(StackFrame::CONSTRUCT); __ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(x1, TurboAssembler::kCountIncludesReceiver); __ DropArguments(x1, MacroAssembler::kCountIncludesReceiver);
__ Ret(); __ Ret();
// Otherwise we do a smi check and fall through to check if the return value // Otherwise we do a smi check and fall through to check if the return value
@ -1205,7 +1205,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
{ {
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the frame created by the baseline call. // Drop the frame created by the baseline call.
__ Pop<TurboAssembler::kAuthLR>(fp, lr); __ Pop<MacroAssembler::kAuthLR>(fp, lr);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
__ Trap(); __ Trap();
} }
@ -1330,7 +1330,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// the frame (that is done below). // the frame (that is done below).
__ Bind(&push_stack_frame); __ Bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL); FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push<TurboAssembler::kSignLR>(lr, fp); __ Push<MacroAssembler::kSignLR>(lr, fp);
__ mov(fp, sp); __ mov(fp, sp);
__ Push(cp, closure); __ Push(cp, closure);
@ -1342,7 +1342,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// Push actual argument count, bytecode array, Smi tagged bytecode array // Push actual argument count, bytecode array, Smi tagged bytecode array
// offset and an undefined (to properly align the stack pointer). // offset and an undefined (to properly align the stack pointer).
static_assert(TurboAssembler::kExtraSlotClaimedByPrologue == 1); static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1);
__ SmiTag(x6, kInterpreterBytecodeOffsetRegister); __ SmiTag(x6, kInterpreterBytecodeOffsetRegister);
__ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister); __ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
@ -1582,7 +1582,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
} }
__ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy, __ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy,
TurboAssembler::kDstLessThanSrcAndReverse); MacroAssembler::kDstLessThanSrcAndReverse);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Store "undefined" as the receiver arg if we need to. // Store "undefined" as the receiver arg if we need to.
@ -1882,7 +1882,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Restore fp, lr. // Restore fp, lr.
__ Mov(sp, fp); __ Mov(sp, fp);
__ Pop<TurboAssembler::kAuthLR>(fp, lr); __ Pop<MacroAssembler::kAuthLR>(fp, lr);
__ LoadEntryFromBuiltinIndex(builtin); __ LoadEntryFromBuiltinIndex(builtin);
__ Jump(builtin); __ Jump(builtin);
@ -2069,7 +2069,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Peek(arg_array, 2 * kSystemPointerSize); __ Peek(arg_array, 2 * kSystemPointerSize);
__ bind(&done); __ bind(&done);
} }
__ DropArguments(argc, TurboAssembler::kCountIncludesReceiver); __ DropArguments(argc, MacroAssembler::kCountIncludesReceiver);
__ PushArgument(this_arg); __ PushArgument(this_arg);
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -2158,7 +2158,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ SlotAddress(copy_from, count); __ SlotAddress(copy_from, count);
__ Add(copy_to, copy_from, kSystemPointerSize); __ Add(copy_to, copy_from, kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, count, __ CopyDoubleWords(copy_to, copy_from, count,
TurboAssembler::kSrcLessThanDst); MacroAssembler::kSrcLessThanDst);
__ Drop(2); __ Drop(2);
} }
@ -2206,7 +2206,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Peek(arguments_list, 3 * kSystemPointerSize); __ Peek(arguments_list, 3 * kSystemPointerSize);
__ bind(&done); __ bind(&done);
} }
__ DropArguments(argc, TurboAssembler::kCountIncludesReceiver); __ DropArguments(argc, MacroAssembler::kCountIncludesReceiver);
__ PushArgument(this_argument); __ PushArgument(this_argument);
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -2264,7 +2264,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&done); __ bind(&done);
} }
__ DropArguments(argc, TurboAssembler::kCountIncludesReceiver); __ DropArguments(argc, MacroAssembler::kCountIncludesReceiver);
// Push receiver (undefined). // Push receiver (undefined).
__ PushArgument(undefined_value); __ PushArgument(undefined_value);
@ -2662,7 +2662,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SlotAddress(copy_to, total_argc); __ SlotAddress(copy_to, total_argc);
__ Sub(copy_from, copy_to, kSystemPointerSize); __ Sub(copy_from, copy_to, kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, argc, __ CopyDoubleWords(copy_to, copy_from, argc,
TurboAssembler::kSrcLessThanDst); MacroAssembler::kSrcLessThanDst);
} }
} }
@ -2996,7 +2996,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
// Save registers. // Save registers.
__ PushXRegList(kSavedGpRegs); __ PushXRegList(kSavedGpRegs);
__ PushQRegList(kSavedFpRegs); __ PushQRegList(kSavedFpRegs);
__ Push<TurboAssembler::kSignLR>(lr, xzr); // xzr is for alignment. __ Push<MacroAssembler::kSignLR>(lr, xzr); // xzr is for alignment.
// Arguments to the runtime function: instance, func_index, and an // Arguments to the runtime function: instance, func_index, and an
// additional stack slot for the NativeModule. The first pushed register // additional stack slot for the NativeModule. The first pushed register
@ -3008,7 +3008,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
__ Mov(vector, kReturnRegister0); __ Mov(vector, kReturnRegister0);
// Restore registers and frame type. // Restore registers and frame type.
__ Pop<TurboAssembler::kAuthLR>(xzr, lr); __ Pop<MacroAssembler::kAuthLR>(xzr, lr);
__ PopQRegList(kSavedFpRegs); __ PopQRegList(kSavedFpRegs);
__ PopXRegList(kSavedGpRegs); __ PopXRegList(kSavedGpRegs);
// Restore the instance from the frame. // Restore the instance from the frame.
@ -3263,8 +3263,8 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance,
// Update active continuation root. // Update active continuation root.
int32_t active_continuation_offset = int32_t active_continuation_offset =
TurboAssembler::RootRegisterOffsetForRootIndex( MacroAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveContinuation); RootIndex::kActiveContinuation);
__ Str(parent, MemOperand(kRootRegister, active_continuation_offset)); __ Str(parent, MemOperand(kRootRegister, active_continuation_offset));
jmpbuf = parent; jmpbuf = parent;
__ LoadExternalPointerField( __ LoadExternalPointerField(
@ -3313,8 +3313,8 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
__ StoreTaggedField(tmp2, state_loc); __ StoreTaggedField(tmp2, state_loc);
__ bind(&undefined); __ bind(&undefined);
int32_t active_suspender_offset = int32_t active_suspender_offset =
TurboAssembler::RootRegisterOffsetForRootIndex( MacroAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveSuspender); RootIndex::kActiveSuspender);
__ Str(suspender, MemOperand(kRootRegister, active_suspender_offset)); __ Str(suspender, MemOperand(kRootRegister, active_suspender_offset));
} }
@ -4317,7 +4317,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
// expected to be on the top of the stack). // expected to be on the top of the stack).
// We cannot use just the ret instruction for this, because we cannot pass // We cannot use just the ret instruction for this, because we cannot pass
// the number of slots to remove in a Register as an argument. // the number of slots to remove in a Register as an argument.
__ DropArguments(param_count, TurboAssembler::kCountExcludesReceiver); __ DropArguments(param_count, MacroAssembler::kCountExcludesReceiver);
__ Ret(lr); __ Ret(lr);
// ------------------------------------------- // -------------------------------------------
@ -4522,14 +4522,15 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
FieldMemOperand(suspender_continuation, FieldMemOperand(suspender_continuation,
WasmContinuationObject::kParentOffset)); WasmContinuationObject::kParentOffset));
int32_t active_continuation_offset = int32_t active_continuation_offset =
TurboAssembler::RootRegisterOffsetForRootIndex( MacroAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveContinuation); RootIndex::kActiveContinuation);
__ Str(caller, MemOperand(kRootRegister, active_continuation_offset)); __ Str(caller, MemOperand(kRootRegister, active_continuation_offset));
DEFINE_REG(parent); DEFINE_REG(parent);
__ LoadAnyTaggedField( __ LoadAnyTaggedField(
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset)); parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
int32_t active_suspender_offset = int32_t active_suspender_offset =
TurboAssembler::RootRegisterOffsetForRootIndex(RootIndex::kActiveSuspender); MacroAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveSuspender);
__ Str(parent, MemOperand(kRootRegister, active_suspender_offset)); __ Str(parent, MemOperand(kRootRegister, active_suspender_offset));
regs.ResetExcept(promise, caller); regs.ResetExcept(promise, caller);
@ -4660,8 +4661,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
scratch, scratch,
FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset)); FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset));
int32_t active_suspender_offset = int32_t active_suspender_offset =
TurboAssembler::RootRegisterOffsetForRootIndex( MacroAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveSuspender); RootIndex::kActiveSuspender);
__ Str(suspender, MemOperand(kRootRegister, active_suspender_offset)); __ Str(suspender, MemOperand(kRootRegister, active_suspender_offset));
// Next line we are going to load a field from suspender, but we have to use // Next line we are going to load a field from suspender, but we have to use
@ -4685,8 +4686,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
active_continuation, kLRHasBeenSaved, SaveFPRegsMode::kIgnore); active_continuation, kLRHasBeenSaved, SaveFPRegsMode::kIgnore);
FREE_REG(active_continuation); FREE_REG(active_continuation);
int32_t active_continuation_offset = int32_t active_continuation_offset =
TurboAssembler::RootRegisterOffsetForRootIndex( MacroAssembler::RootRegisterOffsetForRootIndex(
RootIndex::kActiveContinuation); RootIndex::kActiveContinuation);
__ Str(target_continuation, __ Str(target_continuation,
MemOperand(kRootRegister, active_continuation_offset)); MemOperand(kRootRegister, active_continuation_offset));
@ -4731,7 +4732,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
__ bind(&suspend); __ bind(&suspend);
__ LeaveFrame(StackFrame::STACK_SWITCH); __ LeaveFrame(StackFrame::STACK_SWITCH);
// Pop receiver + parameter. // Pop receiver + parameter.
__ DropArguments(2, TurboAssembler::kCountIncludesReceiver); __ DropArguments(2, MacroAssembler::kCountIncludesReceiver);
__ Ret(lr); __ Ret(lr);
} }
} // namespace } // namespace
@ -5384,9 +5385,9 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// DirectCEntry places the return address on the stack (updated by the GC), // DirectCEntry places the return address on the stack (updated by the GC),
// making the call GC safe. The irregexp backend relies on this. // making the call GC safe. The irregexp backend relies on this.
__ Poke<TurboAssembler::kSignLR>(lr, 0); // Store the return address. __ Poke<MacroAssembler::kSignLR>(lr, 0); // Store the return address.
__ Blr(x10); // Call the C++ function. __ Blr(x10); // Call the C++ function.
__ Peek<TurboAssembler::kAuthLR>(lr, 0); // Return to calling code. __ Peek<MacroAssembler::kAuthLR>(lr, 0); // Return to calling code.
__ AssertFPCRState(); __ AssertFPCRState();
__ Ret(); __ Ret();
} }

View File

@ -125,8 +125,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
} }
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi, __ DropArguments(edx, ecx, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ ret(0); __ ret(0);
__ bind(&stack_overflow); __ bind(&stack_overflow);
@ -280,8 +280,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT); __ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi, __ DropArguments(edx, ecx, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ ret(0); __ ret(0);
// Otherwise we do a smi check and fall through to check if the return value // Otherwise we do a smi check and fall through to check if the return value
@ -768,8 +768,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ leave(); __ leave();
// Drop receiver + arguments. // Drop receiver + arguments.
__ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes, __ DropArguments(params_size, scratch2, MacroAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// Advance the current bytecode offset. This simulates what all bytecode // Advance the current bytecode offset. This simulates what all bytecode
@ -1810,8 +1810,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
} }
__ bind(&no_this_arg); __ bind(&no_this_arg);
__ DropArgumentsAndPushNewReceiver(eax, edi, ecx, __ DropArgumentsAndPushNewReceiver(eax, edi, ecx,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
// Restore receiver to edi. // Restore receiver to edi.
__ movd(edi, xmm0); __ movd(edi, xmm0);
@ -1919,8 +1919,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ movd(xmm0, edx); __ movd(xmm0, edx);
__ DropArgumentsAndPushNewReceiver(eax, ecx, edx, __ DropArgumentsAndPushNewReceiver(eax, ecx, edx,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
// Restore argumentsList. // Restore argumentsList.
__ movd(edx, xmm0); __ movd(edx, xmm0);
@ -1978,8 +1978,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ DropArgumentsAndPushNewReceiver( __ DropArgumentsAndPushNewReceiver(
eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx, eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
// Restore argumentsList. // Restore argumentsList.
__ movd(ecx, xmm0); __ movd(ecx, xmm0);

View File

@ -112,8 +112,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
} }
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(t3, TurboAssembler::kCountIsSmi, __ DropArguments(t3, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver, t3); MacroAssembler::kCountIncludesReceiver, t3);
__ Ret(); __ Ret();
} }
@ -267,8 +267,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT); __ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(a1, TurboAssembler::kCountIsSmi, __ DropArguments(a1, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver, a4); MacroAssembler::kCountIncludesReceiver, a4);
__ Ret(); __ Ret();
__ bind(&check_receiver); __ bind(&check_receiver);
@ -803,8 +803,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LeaveFrame(StackFrame::INTERPRETED); __ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments. // Drop receiver + arguments.
__ DropArguments(params_size, TurboAssembler::kCountIsBytes, __ DropArguments(params_size, MacroAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// Advance the current bytecode offset. This simulates what all bytecode // Advance the current bytecode offset. This simulates what all bytecode
@ -1328,7 +1328,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
// Push the arguments. // Push the arguments.
__ PushArray(start_address, num_args, scratch, scratch2, __ PushArray(start_address, num_args, scratch, scratch2,
TurboAssembler::PushArrayOrder::kReverse); MacroAssembler::PushArrayOrder::kReverse);
} }
// static // static
@ -1794,8 +1794,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Movz(arg_array, undefined_value, scratch); // if argc == 1 __ Movz(arg_array, undefined_value, scratch); // if argc == 1
__ Ld_d(receiver, MemOperand(sp, 0)); __ Ld_d(receiver, MemOperand(sp, 0));
__ DropArgumentsAndPushNewReceiver(argc, this_arg, __ DropArgumentsAndPushNewReceiver(argc, this_arg,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -1889,8 +1889,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Movz(arguments_list, undefined_value, scratch); // if argc == 2 __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
__ DropArgumentsAndPushNewReceiver(argc, this_argument, __ DropArgumentsAndPushNewReceiver(argc, this_argument,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -1949,8 +1949,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Movz(new_target, target, scratch); // if argc == 2 __ Movz(new_target, target, scratch); // if argc == 2
__ DropArgumentsAndPushNewReceiver(argc, undefined_value, __ DropArgumentsAndPushNewReceiver(argc, undefined_value,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------

View File

@ -112,8 +112,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
} }
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(t3, TurboAssembler::kCountIsSmi, __ DropArguments(t3, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver, t3); MacroAssembler::kCountIncludesReceiver, t3);
__ Ret(); __ Ret();
} }
@ -267,8 +267,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT); __ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(a1, TurboAssembler::kCountIsSmi, __ DropArguments(a1, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver, a4); MacroAssembler::kCountIncludesReceiver, a4);
__ Ret(); __ Ret();
__ bind(&check_receiver); __ bind(&check_receiver);
@ -804,8 +804,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LeaveFrame(StackFrame::INTERPRETED); __ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments. // Drop receiver + arguments.
__ DropArguments(params_size, TurboAssembler::kCountIsBytes, __ DropArguments(params_size, MacroAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// Advance the current bytecode offset. This simulates what all bytecode // Advance the current bytecode offset. This simulates what all bytecode
@ -1320,7 +1320,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
// Push the arguments. // Push the arguments.
__ PushArray(start_address, num_args, scratch, scratch2, __ PushArray(start_address, num_args, scratch, scratch2,
TurboAssembler::PushArrayOrder::kReverse); MacroAssembler::PushArrayOrder::kReverse);
} }
// static // static
@ -1784,8 +1784,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Movz(arg_array, undefined_value, scratch); // if argc == 1 __ Movz(arg_array, undefined_value, scratch); // if argc == 1
__ Ld(receiver, MemOperand(sp)); __ Ld(receiver, MemOperand(sp));
__ DropArgumentsAndPushNewReceiver(argc, this_arg, __ DropArgumentsAndPushNewReceiver(argc, this_arg,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -1881,8 +1881,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Movz(arguments_list, undefined_value, scratch); // if argc == 2 __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
__ DropArgumentsAndPushNewReceiver(argc, this_argument, __ DropArgumentsAndPushNewReceiver(argc, this_argument,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -1941,8 +1941,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Movz(new_target, target, scratch); // if argc == 2 __ Movz(new_target, target, scratch); // if argc == 2
__ DropArgumentsAndPushNewReceiver(argc, undefined_value, __ DropArgumentsAndPushNewReceiver(argc, undefined_value,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------

View File

@ -361,8 +361,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Leave construct frame. // Leave construct frame.
} }
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi, __ DropArguments(scratch, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ blr(); __ blr();
__ bind(&stack_overflow); __ bind(&stack_overflow);
@ -611,8 +611,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT); __ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(r4, TurboAssembler::kCountIsSmi, __ DropArguments(r4, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ blr(); __ blr();
__ bind(&check_receiver); __ bind(&check_receiver);
@ -1119,8 +1119,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Leave the frame (also dropping the register file). // Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED); __ LeaveFrame(StackFrame::INTERPRETED);
__ DropArguments(params_size, TurboAssembler::kCountIsBytes, __ DropArguments(params_size, MacroAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// Advance the current bytecode offset. This simulates what all bytecode // Advance the current bytecode offset. This simulates what all bytecode
@ -1636,7 +1636,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ sub(start_address, start_address, scratch); __ sub(start_address, start_address, scratch);
// Push the arguments. // Push the arguments.
__ PushArray(start_address, num_args, scratch, r0, __ PushArray(start_address, num_args, scratch, r0,
TurboAssembler::PushArrayOrder::kReverse); MacroAssembler::PushArrayOrder::kReverse);
} }
// static // static
@ -2027,8 +2027,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done); __ bind(&done);
__ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger, __ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -2111,8 +2111,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done); __ bind(&done);
__ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger, __ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -2160,8 +2160,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done); __ blt(&done);
__ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done); __ bind(&done);
__ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger, __ DropArgumentsAndPushNewReceiver(r3, r7, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------

View File

@ -1381,7 +1381,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
// Push the arguments. // Push the arguments.
__ PushArray(start_address, num_args, __ PushArray(start_address, num_args,
TurboAssembler::PushArrayOrder::kReverse); MacroAssembler::PushArrayOrder::kReverse);
} }
// static // static

View File

@ -428,8 +428,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Leave construct frame. // Leave construct frame.
} }
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi, __ DropArguments(scratch, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ Ret(); __ Ret();
__ bind(&stack_overflow); __ bind(&stack_overflow);
@ -584,8 +584,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT); __ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(r3, TurboAssembler::kCountIsSmi, __ DropArguments(r3, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ Ret(); __ Ret();
__ bind(&check_receiver); __ bind(&check_receiver);
@ -1148,8 +1148,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Leave the frame (also dropping the register file). // Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED); __ LeaveFrame(StackFrame::INTERPRETED);
__ DropArguments(params_size, TurboAssembler::kCountIsBytes, __ DropArguments(params_size, MacroAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// Advance the current bytecode offset. This simulates what all bytecode // Advance the current bytecode offset. This simulates what all bytecode
@ -1657,7 +1657,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ SubS64(start_address, start_address, scratch); __ SubS64(start_address, start_address, scratch);
// Push the arguments. // Push the arguments.
__ PushArray(start_address, num_args, r1, scratch, __ PushArray(start_address, num_args, r1, scratch,
TurboAssembler::PushArrayOrder::kReverse); MacroAssembler::PushArrayOrder::kReverse);
} }
// static // static
@ -2022,8 +2022,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done); __ bind(&done);
__ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger, __ DropArgumentsAndPushNewReceiver(r2, r7, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -2107,8 +2107,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done); __ bind(&done);
__ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger, __ DropArgumentsAndPushNewReceiver(r2, r7, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -2157,8 +2157,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done); __ blt(&done);
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done); __ bind(&done);
__ DropArgumentsAndPushNewReceiver(r2, r6, TurboAssembler::kCountIsInteger, __ DropArgumentsAndPushNewReceiver(r2, r6, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------

View File

@ -125,7 +125,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi, __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ ret(0); __ ret(0);
@ -282,7 +282,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT); __ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi, __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ ret(0); __ ret(0);
// If the result is a smi, it is *not* an object in the ECMA sense. // If the result is a smi, it is *not* an object in the ECMA sense.
@ -890,8 +890,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ leave(); __ leave();
// Drop receiver + arguments. // Drop receiver + arguments.
__ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes, __ DropArguments(params_size, scratch2, MacroAssembler::kCountIsBytes,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// Tail-call |function_id| if |actual_state| == |expected_state| // Tail-call |function_id| if |actual_state| == |expected_state|
@ -1265,7 +1265,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
kSystemPointerSize)); kSystemPointerSize));
// Push the arguments. // Push the arguments.
__ PushArray(start_address, num_args, scratch, __ PushArray(start_address, num_args, scratch,
TurboAssembler::PushArrayOrder::kReverse); MacroAssembler::PushArrayOrder::kReverse);
} }
// static // static
@ -1814,8 +1814,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
} }
__ bind(&no_this_arg); __ bind(&no_this_arg);
__ DropArgumentsAndPushNewReceiver(rax, rdx, rcx, __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -1919,8 +1919,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ movq(rbx, args[3]); // argumentsList __ movq(rbx, args[3]); // argumentsList
__ bind(&done); __ bind(&done);
__ DropArgumentsAndPushNewReceiver(rax, rdx, rcx, __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -1971,8 +1971,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&done); __ bind(&done);
__ DropArgumentsAndPushNewReceiver( __ DropArgumentsAndPushNewReceiver(
rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx, rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx,
TurboAssembler::kCountIsInteger, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} }
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -3812,8 +3812,8 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
// expected to be on the top of the stack). // expected to be on the top of the stack).
// We cannot use just the ret instruction for this, because we cannot pass the // We cannot use just the ret instruction for this, because we cannot pass the
// number of slots to remove in a Register as an argument. // number of slots to remove in a Register as an argument.
__ DropArguments(param_count, rbx, TurboAssembler::kCountIsInteger, __ DropArguments(param_count, rbx, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountExcludesReceiver); MacroAssembler::kCountExcludesReceiver);
__ ret(0); __ ret(0);
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------

View File

@ -1435,7 +1435,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
private: private:
friend class Assembler; friend class Assembler;
friend class TurboAssembler; friend class MacroAssembler;
template <typename T> template <typename T>
bool CanAcquireVfp() const; bool CanAcquireVfp() const;

File diff suppressed because it is too large Load Diff

View File

@ -43,9 +43,9 @@ enum TargetAddressStorageMode {
NEVER_INLINE_TARGET_ADDRESS NEVER_INLINE_TARGET_ADDRESS
}; };
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public: public:
using TurboAssemblerBase::TurboAssemblerBase; using MacroAssemblerBase::MacroAssemblerBase;
// Activation support. // Activation support.
void EnterFrame(StackFrame::Type type, void EnterFrame(StackFrame::Type type,
@ -596,49 +596,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src); void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src);
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src); void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src);
private:
// Compare single values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
const Register fpscr_flags,
const Condition cond = al);
void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
const Register fpscr_flags,
const Condition cond = al);
// Compare double values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Register fpscr_flags,
const Condition cond = al);
void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2,
const Register fpscr_flags,
const Condition cond = al);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
// Implementation helpers for FloatMin and FloatMax.
template <typename T>
void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
template <typename T>
void FloatMinHelper(T result, T left, T right, Label* out_of_line);
template <typename T>
void FloatMaxOutOfLineHelper(T result, T left, T right);
template <typename T>
void FloatMinOutOfLineHelper(T result, T left, T right);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
void Mls(Register dst, Register src1, Register src2, Register srcA, void Mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al); Condition cond = al);
void And(Register dst, Register src1, const Operand& src2, void And(Register dst, Register src1, const Operand& src2,
@ -899,6 +856,42 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register actual_parameter_count, Label* done, Register actual_parameter_count, Label* done,
InvokeType type); InvokeType type);
// Compare single values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
const Register fpscr_flags,
const Condition cond = al);
void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
const Register fpscr_flags,
const Condition cond = al);
// Compare double values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Register fpscr_flags,
const Condition cond = al);
void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2,
const Register fpscr_flags,
const Condition cond = al);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
// Implementation helpers for FloatMin and FloatMax.
template <typename T>
void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
template <typename T>
void FloatMinHelper(T result, T left, T right, Label* out_of_line);
template <typename T>
void FloatMaxOutOfLineHelper(T result, T left, T right);
template <typename T>
void FloatMinOutOfLineHelper(T result, T left, T right);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
}; };

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -146,9 +146,9 @@ enum PreShiftImmMode {
// platforms are updated. // platforms are updated.
enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit }; enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public: public:
using TurboAssemblerBase::TurboAssemblerBase; using MacroAssemblerBase::MacroAssemblerBase;
#if DEBUG #if DEBUG
void set_allow_macro_instructions(bool value) { void set_allow_macro_instructions(bool value) {
@ -1484,81 +1484,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
ExternalPointerTag tag, ExternalPointerTag tag,
Register isolate_root = Register::no_reg()); Register isolate_root = Register::no_reg());
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
// (Push|Pop)CPURegList to bundle together run-time assertions for a large
// block of registers.
//
// Note that size is per register, and is specified in bytes.
void PushHelper(int count, int size, const CPURegister& src0,
const CPURegister& src1, const CPURegister& src2,
const CPURegister& src3);
void PopHelper(int count, int size, const CPURegister& dst0,
const CPURegister& dst1, const CPURegister& dst2,
const CPURegister& dst3);
void ConditionalCompareMacro(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond,
ConditionalCompareOp op);
void AddSubWithCarryMacro(const Register& rd, const Register& rn,
const Operand& operand, FlagsUpdate S,
AddSubWithCarryOp op);
// Call Printf. On a native build, a simple call will be generated, but if the
// simulator is being used then a suitable pseudo-instruction is used. The
// arguments and stack must be prepared by the caller as for a normal AAPCS64
// call to 'printf'.
//
// The 'args' argument should point to an array of variable arguments in their
// proper PCS registers (and in calling order). The argument registers can
// have mixed types. The format string (x0) should not be included.
void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
private:
#if DEBUG
// Tell whether any of the macro instruction can be used. When false the
// MacroAssembler will assert if a method which can emit a variable number
// of instructions is called.
bool allow_macro_instructions_ = true;
#endif
// Scratch registers available for use by the MacroAssembler.
CPURegList tmp_list_ = DefaultTmpList();
CPURegList fptmp_list_ = DefaultFPTmpList();
// Helps resolve branching to labels potentially out of range.
// If the label is not bound, it registers the information necessary to later
// be able to emit a veneer for this branch if necessary.
// If the label is bound, it returns true if the label (or the previous link
// in the label chain) is out of range. In that case the caller is responsible
// for generating appropriate code.
// Otherwise it returns false.
// This function also checks wether veneers need to be emitted.
bool NeedExtraInstructionsOrRegisterBranch(Label* label,
ImmBranchType branch_type);
void Movi16bitHelper(const VRegister& vd, uint64_t imm);
void Movi32bitHelper(const VRegister& vd, uint64_t imm);
void Movi64bitHelper(const VRegister& vd, uint64_t imm);
void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
LoadStoreOp op);
void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
byte* pc);
void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
};
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
// Instruction set functions ------------------------------------------------ // Instruction set functions ------------------------------------------------
// Logical macros. // Logical macros.
inline void Bics(const Register& rd, const Register& rn, inline void Bics(const Register& rd, const Register& rn,
@ -1594,18 +1519,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Condition cond); Condition cond);
inline void Extr(const Register& rd, const Register& rn, const Register& rm, inline void Extr(const Register& rd, const Register& rn, const Register& rm,
unsigned lsb); unsigned lsb);
void Fcvtl(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions());
fcvtl(vd, vn);
}
void Fcvtl2(const VRegister& vd, const VRegister& vn) { void Fcvtl2(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions()); DCHECK(allow_macro_instructions());
fcvtl2(vd, vn); fcvtl2(vd, vn);
} }
void Fcvtn(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions());
fcvtn(vd, vn);
}
void Fcvtn2(const VRegister& vd, const VRegister& vn) { void Fcvtn2(const VRegister& vd, const VRegister& vn) {
DCHECK(allow_macro_instructions()); DCHECK(allow_macro_instructions());
fcvtn2(vd, vn); fcvtn2(vd, vn);
@ -1641,7 +1558,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DCHECK(allow_macro_instructions()); DCHECK(allow_macro_instructions());
mvni(vd, imm8, shift, shift_amount); mvni(vd, imm8, shift, shift_amount);
} }
inline void Rev(const Register& rd, const Register& rn);
inline void Smaddl(const Register& rd, const Register& rn, const Register& rm, inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
const Register& ra); const Register& ra);
inline void Smsubl(const Register& rd, const Register& rn, const Register& rm, inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
@ -2139,6 +2055,76 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register feedback_vector, FeedbackSlot slot, Register feedback_vector, FeedbackSlot slot,
Label* on_result, Label::Distance distance); Label* on_result, Label::Distance distance);
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
// (Push|Pop)CPURegList to bundle together run-time assertions for a large
// block of registers.
//
// Note that size is per register, and is specified in bytes.
void PushHelper(int count, int size, const CPURegister& src0,
const CPURegister& src1, const CPURegister& src2,
const CPURegister& src3);
void PopHelper(int count, int size, const CPURegister& dst0,
const CPURegister& dst1, const CPURegister& dst2,
const CPURegister& dst3);
void ConditionalCompareMacro(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond,
ConditionalCompareOp op);
void AddSubWithCarryMacro(const Register& rd, const Register& rn,
const Operand& operand, FlagsUpdate S,
AddSubWithCarryOp op);
// Call Printf. On a native build, a simple call will be generated, but if the
// simulator is being used then a suitable pseudo-instruction is used. The
// arguments and stack must be prepared by the caller as for a normal AAPCS64
// call to 'printf'.
//
// The 'args' argument should point to an array of variable arguments in their
// proper PCS registers (and in calling order). The argument registers can
// have mixed types. The format string (x0) should not be included.
void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
private:
#if DEBUG
// Tell whether any of the macro instruction can be used. When false the
// MacroAssembler will assert if a method which can emit a variable number
// of instructions is called.
bool allow_macro_instructions_ = true;
#endif
// Scratch registers available for use by the MacroAssembler.
CPURegList tmp_list_ = DefaultTmpList();
CPURegList fptmp_list_ = DefaultFPTmpList();
// Helps resolve branching to labels potentially out of range.
// If the label is not bound, it registers the information necessary to later
// be able to emit a veneer for this branch if necessary.
// If the label is bound, it returns true if the label (or the previous link
// in the label chain) is out of range. In that case the caller is responsible
// for generating appropriate code.
// Otherwise it returns false.
// This function also checks wether veneers need to be emitted.
bool NeedExtraInstructionsOrRegisterBranch(Label* label,
ImmBranchType branch_type);
void Movi16bitHelper(const VRegister& vd, uint64_t imm);
void Movi32bitHelper(const VRegister& vd, uint64_t imm);
void Movi64bitHelper(const VRegister& vd, uint64_t imm);
void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
LoadStoreOp op);
void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
byte* pc);
void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
}; };
@ -2148,38 +2134,38 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// emitted is what you specified when creating the scope. // emitted is what you specified when creating the scope.
class V8_NODISCARD InstructionAccurateScope { class V8_NODISCARD InstructionAccurateScope {
public: public:
explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0) explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
: tasm_(tasm), : masm_(masm),
block_pool_(tasm, count * kInstrSize) block_pool_(masm, count * kInstrSize)
#ifdef DEBUG #ifdef DEBUG
, ,
size_(count * kInstrSize) size_(count * kInstrSize)
#endif #endif
{ {
tasm_->CheckVeneerPool(false, true, count * kInstrSize); masm_->CheckVeneerPool(false, true, count * kInstrSize);
tasm_->StartBlockVeneerPool(); masm_->StartBlockVeneerPool();
#ifdef DEBUG #ifdef DEBUG
if (count != 0) { if (count != 0) {
tasm_->bind(&start_); masm_->bind(&start_);
} }
previous_allow_macro_instructions_ = tasm_->allow_macro_instructions(); previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
tasm_->set_allow_macro_instructions(false); masm_->set_allow_macro_instructions(false);
#endif #endif
} }
~InstructionAccurateScope() { ~InstructionAccurateScope() {
tasm_->EndBlockVeneerPool(); masm_->EndBlockVeneerPool();
#ifdef DEBUG #ifdef DEBUG
if (start_.is_bound()) { if (start_.is_bound()) {
DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_); DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
} }
tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_); masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
#endif #endif
} }
private: private:
TurboAssembler* tasm_; MacroAssembler* masm_;
TurboAssembler::BlockConstPoolScope block_pool_; MacroAssembler::BlockConstPoolScope block_pool_;
#ifdef DEBUG #ifdef DEBUG
size_t size_; size_t size_;
Label start_; Label start_;
@ -2188,7 +2174,7 @@ class V8_NODISCARD InstructionAccurateScope {
}; };
// This scope utility allows scratch registers to be managed safely. The // This scope utility allows scratch registers to be managed safely. The
// TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch // MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
// registers. These registers can be allocated on demand, and will be returned // registers. These registers can be allocated on demand, and will be returned
// at the end of the scope. // at the end of the scope.
// //
@ -2198,9 +2184,9 @@ class V8_NODISCARD InstructionAccurateScope {
// order as the constructors. We do not have assertions for this. // order as the constructors. We do not have assertions for this.
class V8_NODISCARD UseScratchRegisterScope { class V8_NODISCARD UseScratchRegisterScope {
public: public:
explicit UseScratchRegisterScope(TurboAssembler* tasm) explicit UseScratchRegisterScope(MacroAssembler* masm)
: available_(tasm->TmpList()), : available_(masm->TmpList()),
availablefp_(tasm->FPTmpList()), availablefp_(masm->FPTmpList()),
old_available_(available_->bits()), old_available_(available_->bits()),
old_availablefp_(availablefp_->bits()) { old_availablefp_(availablefp_->bits()) {
DCHECK_EQ(available_->type(), CPURegister::kRegister); DCHECK_EQ(available_->type(), CPURegister::kRegister);

View File

@ -21,11 +21,11 @@
#include "src/codegen/ia32/register-ia32.h" #include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/interface-descriptors-inl.h" #include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/label.h" #include "src/codegen/label.h"
#include "src/codegen/macro-assembler-base.h"
#include "src/codegen/macro-assembler.h" #include "src/codegen/macro-assembler.h"
#include "src/codegen/register.h" #include "src/codegen/register.h"
#include "src/codegen/reglist.h" #include "src/codegen/reglist.h"
#include "src/codegen/reloc-info.h" #include "src/codegen/reloc-info.h"
#include "src/codegen/turbo-assembler.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/deoptimizer/deoptimizer.h" #include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h" #include "src/execution/frame-constants.h"
@ -77,18 +77,18 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// MacroAssembler implementation. // MacroAssembler implementation.
void TurboAssembler::InitializeRootRegister() { void MacroAssembler::InitializeRootRegister() {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Move(kRootRegister, Immediate(isolate_root)); Move(kRootRegister, Immediate(isolate_root));
} }
Operand TurboAssembler::RootAsOperand(RootIndex index) { Operand MacroAssembler::RootAsOperand(RootIndex index) {
DCHECK(root_array_available()); DCHECK(root_array_available());
return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)); return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index));
} }
void TurboAssembler::LoadRoot(Register destination, RootIndex index) { void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (root_array_available()) { if (root_array_available()) {
mov(destination, RootAsOperand(index)); mov(destination, RootAsOperand(index));
@ -113,7 +113,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
mov(destination, Operand(destination, RootRegisterOffsetForRootIndex(index))); mov(destination, Operand(destination, RootRegisterOffsetForRootIndex(index)));
} }
void TurboAssembler::CompareRoot(Register with, Register scratch, void MacroAssembler::CompareRoot(Register with, Register scratch,
RootIndex index) { RootIndex index) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (root_array_available()) { if (root_array_available()) {
@ -126,7 +126,7 @@ void TurboAssembler::CompareRoot(Register with, Register scratch,
} }
} }
void TurboAssembler::CompareRoot(Register with, RootIndex index) { void MacroAssembler::CompareRoot(Register with, RootIndex index) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (root_array_available()) { if (root_array_available()) {
cmp(with, RootAsOperand(index)); cmp(with, RootAsOperand(index));
@ -180,7 +180,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
j(below_equal, on_in_range, near_jump); j(below_equal, on_in_range, near_jump);
} }
void TurboAssembler::PushArray(Register array, Register size, Register scratch, void MacroAssembler::PushArray(Register array, Register size, Register scratch,
PushArrayOrder order) { PushArrayOrder order) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(array, size, scratch)); DCHECK(!AreAliased(array, size, scratch));
@ -206,7 +206,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
} }
} }
Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference, Operand MacroAssembler::ExternalReferenceAsOperand(ExternalReference reference,
Register scratch) { Register scratch) {
if (root_array_available() && options().enable_root_relative_access) { if (root_array_available() && options().enable_root_relative_access) {
intptr_t delta = intptr_t delta =
@ -233,8 +233,8 @@ Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
} }
// TODO(v8:6666): If possible, refactor into a platform-independent function in // TODO(v8:6666): If possible, refactor into a platform-independent function in
// TurboAssembler. // MacroAssembler.
Operand TurboAssembler::ExternalReferenceAddressAsOperand( Operand MacroAssembler::ExternalReferenceAddressAsOperand(
ExternalReference reference) { ExternalReference reference) {
DCHECK(root_array_available()); DCHECK(root_array_available());
DCHECK(options().isolate_independent_code); DCHECK(options().isolate_independent_code);
@ -244,8 +244,8 @@ Operand TurboAssembler::ExternalReferenceAddressAsOperand(
} }
// TODO(v8:6666): If possible, refactor into a platform-independent function in // TODO(v8:6666): If possible, refactor into a platform-independent function in
// TurboAssembler. // MacroAssembler.
Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) { Operand MacroAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
DCHECK(root_array_available()); DCHECK(root_array_available());
Builtin builtin; Builtin builtin;
@ -264,7 +264,7 @@ Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
} }
} }
void TurboAssembler::LoadFromConstantsTable(Register destination, void MacroAssembler::LoadFromConstantsTable(Register destination,
int constant_index) { int constant_index) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
@ -273,7 +273,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index))); FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
} }
void TurboAssembler::LoadRootRegisterOffset(Register destination, void MacroAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) { intptr_t offset) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(is_int32(offset)); DCHECK(is_int32(offset));
@ -285,13 +285,13 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
} }
} }
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(root_array_available()); DCHECK(root_array_available());
mov(destination, Operand(kRootRegister, offset)); mov(destination, Operand(kRootRegister, offset));
} }
void TurboAssembler::LoadAddress(Register destination, void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) { ExternalReference source) {
// TODO(jgruber): Add support for enable_root_relative_access. // TODO(jgruber): Add support for enable_root_relative_access.
if (root_array_available() && options().isolate_independent_code) { if (root_array_available() && options().isolate_independent_code) {
@ -301,7 +301,7 @@ void TurboAssembler::LoadAddress(Register destination,
mov(destination, Immediate(source)); mov(destination, Immediate(source));
} }
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const { Register exclusion) const {
int bytes = 0; int bytes = 0;
RegList saved_regs = kCallerSaved - exclusion; RegList saved_regs = kCallerSaved - exclusion;
@ -315,7 +315,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes; return bytes;
} }
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) { Register exclusion) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// We don't allow a GC in a write barrier slow path so there is no need to // We don't allow a GC in a write barrier slow path so there is no need to
@ -346,7 +346,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
return bytes; return bytes;
} }
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
int bytes = 0; int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) { if (fp_mode == SaveFPRegsMode::kSave) {
@ -412,19 +412,19 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
} }
} }
void TurboAssembler::MaybeSaveRegisters(RegList registers) { void MacroAssembler::MaybeSaveRegisters(RegList registers) {
for (Register reg : registers) { for (Register reg : registers) {
push(reg); push(reg);
} }
} }
void TurboAssembler::MaybeRestoreRegisters(RegList registers) { void MacroAssembler::MaybeRestoreRegisters(RegList registers) {
for (Register reg : base::Reversed(registers)) { for (Register reg : base::Reversed(registers)) {
pop(reg); pop(reg);
} }
} }
void TurboAssembler::CallEphemeronKeyBarrier(Register object, void MacroAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address, Register slot_address,
SaveFPRegsMode fp_mode) { SaveFPRegsMode fp_mode) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
@ -449,7 +449,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object,
MaybeRestoreRegisters(registers); MaybeRestoreRegisters(registers);
} }
void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
Register slot_address, Register slot_address,
SaveFPRegsMode fp_mode, SaveFPRegsMode fp_mode,
StubCallMode mode) { StubCallMode mode) {
@ -473,7 +473,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
MaybeRestoreRegisters(registers); MaybeRestoreRegisters(registers);
} }
void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
SaveFPRegsMode fp_mode, SaveFPRegsMode fp_mode,
StubCallMode mode) { StubCallMode mode) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
@ -547,17 +547,17 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
} }
} }
void TurboAssembler::Cvtsi2ss(XMMRegister dst, Operand src) { void MacroAssembler::Cvtsi2ss(XMMRegister dst, Operand src) {
xorps(dst, dst); xorps(dst, dst);
cvtsi2ss(dst, src); cvtsi2ss(dst, src);
} }
void TurboAssembler::Cvtsi2sd(XMMRegister dst, Operand src) { void MacroAssembler::Cvtsi2sd(XMMRegister dst, Operand src) {
xorpd(dst, dst); xorpd(dst, dst);
cvtsi2sd(dst, src); cvtsi2sd(dst, src);
} }
void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) { void MacroAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
Label done; Label done;
Register src_reg = src.is_reg_only() ? src.reg() : tmp; Register src_reg = src.is_reg_only() ? src.reg() : tmp;
if (src_reg == tmp) mov(tmp, src); if (src_reg == tmp) mov(tmp, src);
@ -578,7 +578,7 @@ void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
bind(&done); bind(&done);
} }
void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) { void MacroAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
Label done; Label done;
cvttss2si(dst, src); cvttss2si(dst, src);
test(dst, dst); test(dst, dst);
@ -590,7 +590,7 @@ void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
bind(&done); bind(&done);
} }
void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) { void MacroAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
Label done; Label done;
cmp(src, Immediate(0)); cmp(src, Immediate(0));
ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias(); ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
@ -600,14 +600,14 @@ void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
bind(&done); bind(&done);
} }
void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) { void MacroAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
Move(tmp, -2147483648.0); Move(tmp, -2147483648.0);
addsd(tmp, src); addsd(tmp, src);
cvttsd2si(dst, tmp); cvttsd2si(dst, tmp);
add(dst, Immediate(0x80000000)); add(dst, Immediate(0x80000000));
} }
void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) { void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
DCHECK_GE(63, shift); DCHECK_GE(63, shift);
if (shift >= 32) { if (shift >= 32) {
mov(high, low); mov(high, low);
@ -619,7 +619,7 @@ void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
} }
} }
void TurboAssembler::ShlPair_cl(Register high, Register low) { void MacroAssembler::ShlPair_cl(Register high, Register low) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
shld_cl(high, low); shld_cl(high, low);
shl_cl(low); shl_cl(low);
@ -631,7 +631,7 @@ void TurboAssembler::ShlPair_cl(Register high, Register low) {
bind(&done); bind(&done);
} }
void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) { void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
DCHECK_GE(63, shift); DCHECK_GE(63, shift);
if (shift >= 32) { if (shift >= 32) {
mov(low, high); mov(low, high);
@ -643,7 +643,7 @@ void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) {
} }
} }
void TurboAssembler::ShrPair_cl(Register high, Register low) { void MacroAssembler::ShrPair_cl(Register high, Register low) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
shrd_cl(low, high); shrd_cl(low, high);
shr_cl(high); shr_cl(high);
@ -655,7 +655,7 @@ void TurboAssembler::ShrPair_cl(Register high, Register low) {
bind(&done); bind(&done);
} }
void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) { void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK_GE(63, shift); DCHECK_GE(63, shift);
if (shift >= 32) { if (shift >= 32) {
@ -668,7 +668,7 @@ void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
} }
} }
void TurboAssembler::SarPair_cl(Register high, Register low) { void MacroAssembler::SarPair_cl(Register high, Register low) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
shrd_cl(low, high); shrd_cl(low, high);
sar_cl(high); sar_cl(high);
@ -680,7 +680,7 @@ void TurboAssembler::SarPair_cl(Register high, Register low) {
bind(&done); bind(&done);
} }
void TurboAssembler::LoadMap(Register destination, Register object) { void MacroAssembler::LoadMap(Register destination, Register object) {
mov(destination, FieldOperand(object, HeapObject::kMapOffset)); mov(destination, FieldOperand(object, HeapObject::kMapOffset));
} }
@ -979,23 +979,23 @@ void MacroAssembler::AssertNotSmi(Register object) {
} }
} }
void TurboAssembler::Assert(Condition cc, AbortReason reason) { void MacroAssembler::Assert(Condition cc, AbortReason reason) {
if (v8_flags.debug_code) Check(cc, reason); if (v8_flags.debug_code) Check(cc, reason);
} }
void TurboAssembler::AssertUnreachable(AbortReason reason) { void MacroAssembler::AssertUnreachable(AbortReason reason) {
if (v8_flags.debug_code) Abort(reason); if (v8_flags.debug_code) Abort(reason);
} }
#endif // V8_ENABLE_DEBUG_CODE #endif // V8_ENABLE_DEBUG_CODE
void TurboAssembler::StubPrologue(StackFrame::Type type) { void MacroAssembler::StubPrologue(StackFrame::Type type) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
push(ebp); // Caller's frame pointer. push(ebp); // Caller's frame pointer.
mov(ebp, esp); mov(ebp, esp);
push(Immediate(StackFrame::TypeToMarker(type))); push(Immediate(StackFrame::TypeToMarker(type)));
} }
void TurboAssembler::Prologue() { void MacroAssembler::Prologue() {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
push(ebp); // Caller's frame pointer. push(ebp); // Caller's frame pointer.
mov(ebp, esp); mov(ebp, esp);
@ -1004,7 +1004,7 @@ void TurboAssembler::Prologue() {
push(kJavaScriptCallArgCountRegister); // Actual argument count. push(kJavaScriptCallArgCountRegister); // Actual argument count.
} }
void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
ArgumentsCountMode mode) { ArgumentsCountMode mode) {
int receiver_bytes = int receiver_bytes =
(mode == kCountExcludesReceiver) ? kSystemPointerSize : 0; (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
@ -1034,7 +1034,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
} }
} }
void TurboAssembler::DropArguments(Register count, Register scratch, void MacroAssembler::DropArguments(Register count, Register scratch,
ArgumentsCountType type, ArgumentsCountType type,
ArgumentsCountMode mode) { ArgumentsCountMode mode) {
DCHECK(!AreAliased(count, scratch)); DCHECK(!AreAliased(count, scratch));
@ -1043,7 +1043,7 @@ void TurboAssembler::DropArguments(Register count, Register scratch,
PushReturnAddressFrom(scratch); PushReturnAddressFrom(scratch);
} }
void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Register receiver, Register receiver,
Register scratch, Register scratch,
ArgumentsCountType type, ArgumentsCountType type,
@ -1055,7 +1055,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
PushReturnAddressFrom(scratch); PushReturnAddressFrom(scratch);
} }
void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Operand receiver, Operand receiver,
Register scratch, Register scratch,
ArgumentsCountType type, ArgumentsCountType type,
@ -1068,7 +1068,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
PushReturnAddressFrom(scratch); PushReturnAddressFrom(scratch);
} }
void TurboAssembler::EnterFrame(StackFrame::Type type) { void MacroAssembler::EnterFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
push(ebp); push(ebp);
mov(ebp, esp); mov(ebp, esp);
@ -1080,7 +1080,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY
} }
void TurboAssembler::LeaveFrame(StackFrame::Type type) { void MacroAssembler::LeaveFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (v8_flags.debug_code && !StackFrame::IsJavaScript(type)) { if (v8_flags.debug_code && !StackFrame::IsJavaScript(type)) {
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset), cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
@ -1091,7 +1091,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
} }
#ifdef V8_OS_WIN #ifdef V8_OS_WIN
void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { void MacroAssembler::AllocateStackSpace(Register bytes_scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// In windows, we cannot increment the stack size by more than one page // In windows, we cannot increment the stack size by more than one page
// (minimum page size is 4KB) without accessing at least one byte on the // (minimum page size is 4KB) without accessing at least one byte on the
@ -1113,7 +1113,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
sub(esp, bytes_scratch); sub(esp, bytes_scratch);
} }
void TurboAssembler::AllocateStackSpace(int bytes) { void MacroAssembler::AllocateStackSpace(int bytes) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0); DCHECK_GE(bytes, 0);
while (bytes >= kStackPageSize) { while (bytes >= kStackPageSize) {
@ -1332,10 +1332,10 @@ void MacroAssembler::CompareStackLimit(Register with, StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate) ? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate); : ExternalReference::address_of_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset = intptr_t offset =
TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
cmp(with, Operand(kRootRegister, offset)); cmp(with, Operand(kRootRegister, offset));
} }
@ -1565,9 +1565,9 @@ void MacroAssembler::LoadNativeContextSlot(Register destination, int index) {
mov(destination, Operand(destination, Context::SlotOffset(index))); mov(destination, Operand(destination, Context::SlotOffset(index)));
} }
void TurboAssembler::Ret() { ret(0); } void MacroAssembler::Ret() { ret(0); }
void TurboAssembler::Ret(int bytes_dropped, Register scratch) { void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
if (is_uint16(bytes_dropped)) { if (is_uint16(bytes_dropped)) {
ret(bytes_dropped); ret(bytes_dropped);
} else { } else {
@ -1578,7 +1578,7 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
} }
} }
void TurboAssembler::Push(Immediate value) { void MacroAssembler::Push(Immediate value) {
if (root_array_available() && options().isolate_independent_code) { if (root_array_available() && options().isolate_independent_code) {
if (value.is_embedded_object()) { if (value.is_embedded_object()) {
Push(HeapObjectAsOperand(value.embedded_object())); Push(HeapObjectAsOperand(value.embedded_object()));
@ -1597,13 +1597,13 @@ void MacroAssembler::Drop(int stack_elements) {
} }
} }
void TurboAssembler::Move(Register dst, Register src) { void MacroAssembler::Move(Register dst, Register src) {
if (dst != src) { if (dst != src) {
mov(dst, src); mov(dst, src);
} }
} }
void TurboAssembler::Move(Register dst, const Immediate& src) { void MacroAssembler::Move(Register dst, const Immediate& src) {
if (!src.is_heap_number_request() && src.is_zero()) { if (!src.is_heap_number_request() && src.is_zero()) {
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0. xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
} else if (src.is_external_reference()) { } else if (src.is_external_reference()) {
@ -1613,7 +1613,7 @@ void TurboAssembler::Move(Register dst, const Immediate& src) {
} }
} }
void TurboAssembler::Move(Operand dst, const Immediate& src) { void MacroAssembler::Move(Operand dst, const Immediate& src) {
// Since there's no scratch register available, take a detour through the // Since there's no scratch register available, take a detour through the
// stack. // stack.
if (root_array_available() && options().isolate_independent_code) { if (root_array_available() && options().isolate_independent_code) {
@ -1632,9 +1632,9 @@ void TurboAssembler::Move(Operand dst, const Immediate& src) {
} }
} }
void TurboAssembler::Move(Register dst, Operand src) { mov(dst, src); } void MacroAssembler::Move(Register dst, Operand src) { mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> src) { void MacroAssembler::Move(Register dst, Handle<HeapObject> src) {
if (root_array_available() && options().isolate_independent_code) { if (root_array_available() && options().isolate_independent_code) {
IndirectLoadConstant(dst, src); IndirectLoadConstant(dst, src);
return; return;
@ -1642,7 +1642,7 @@ void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
mov(dst, src); mov(dst, src);
} }
void TurboAssembler::Move(XMMRegister dst, uint32_t src) { void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
if (src == 0) { if (src == 0) {
pxor(dst, dst); pxor(dst, dst);
} else { } else {
@ -1666,7 +1666,7 @@ void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
} }
} }
void TurboAssembler::Move(XMMRegister dst, uint64_t src) { void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
if (src == 0) { if (src == 0) {
pxor(dst, dst); pxor(dst, dst);
} else { } else {
@ -1705,7 +1705,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
} }
} }
void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src, void MacroAssembler::PextrdPreSse41(Register dst, XMMRegister src,
uint8_t imm8) { uint8_t imm8) {
if (imm8 == 0) { if (imm8 == 0) {
Movd(dst, src); Movd(dst, src);
@ -1721,7 +1721,7 @@ void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
add(esp, Immediate(kDoubleSize)); add(esp, Immediate(kDoubleSize));
} }
void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8, void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
uint32_t* load_pc_offset) { uint32_t* load_pc_offset) {
// Without AVX or SSE, we can only have 64-bit values in xmm registers. // Without AVX or SSE, we can only have 64-bit values in xmm registers.
// We don't have an xmm scratch register, so move the data via the stack. This // We don't have an xmm scratch register, so move the data via the stack. This
@ -1742,7 +1742,7 @@ void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
add(esp, Immediate(kDoubleSize)); add(esp, Immediate(kDoubleSize));
} }
void TurboAssembler::Lzcnt(Register dst, Operand src) { void MacroAssembler::Lzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) { if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT); CpuFeatureScope scope(this, LZCNT);
lzcnt(dst, src); lzcnt(dst, src);
@ -1756,7 +1756,7 @@ void TurboAssembler::Lzcnt(Register dst, Operand src) {
xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x. xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
} }
void TurboAssembler::Tzcnt(Register dst, Operand src) { void MacroAssembler::Tzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(BMI1)) { if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1); CpuFeatureScope scope(this, BMI1);
tzcnt(dst, src); tzcnt(dst, src);
@ -1769,7 +1769,7 @@ void TurboAssembler::Tzcnt(Register dst, Operand src) {
bind(&not_zero_src); bind(&not_zero_src);
} }
void TurboAssembler::Popcnt(Register dst, Operand src) { void MacroAssembler::Popcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(POPCNT)) { if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT); CpuFeatureScope scope(this, POPCNT);
popcnt(dst, src); popcnt(dst, src);
@ -1816,7 +1816,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
} }
} }
void TurboAssembler::Check(Condition cc, AbortReason reason) { void MacroAssembler::Check(Condition cc, AbortReason reason) {
Label L; Label L;
j(cc, &L); j(cc, &L);
Abort(reason); Abort(reason);
@ -1824,7 +1824,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason) {
bind(&L); bind(&L);
} }
void TurboAssembler::CheckStackAlignment() { void MacroAssembler::CheckStackAlignment() {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1; int frame_alignment_mask = frame_alignment - 1;
@ -1839,7 +1839,7 @@ void TurboAssembler::CheckStackAlignment() {
} }
} }
void TurboAssembler::Abort(AbortReason reason) { void MacroAssembler::Abort(AbortReason reason) {
if (v8_flags.code_comments) { if (v8_flags.code_comments) {
const char* msg = GetAbortReason(reason); const char* msg = GetAbortReason(reason);
RecordComment("Abort message: "); RecordComment("Abort message: ");
@ -1882,7 +1882,7 @@ void TurboAssembler::Abort(AbortReason reason) {
int3(); int3();
} }
void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment = base::OS::ActivationFrameAlignment();
if (frame_alignment != 0) { if (frame_alignment != 0) {
@ -1898,14 +1898,14 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
} }
} }
void TurboAssembler::CallCFunction(ExternalReference function, void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) { int num_arguments) {
// Trashing eax is ok as it will be the return value. // Trashing eax is ok as it will be the return value.
Move(eax, Immediate(function)); Move(eax, Immediate(function));
CallCFunction(eax, num_arguments); CallCFunction(eax, num_arguments);
} }
void TurboAssembler::CallCFunction(Register function, int num_arguments) { void MacroAssembler::CallCFunction(Register function, int num_arguments) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK_LE(num_arguments, kMaxCParameters); DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame()); DCHECK(has_frame());
@ -1956,7 +1956,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
} }
} }
void TurboAssembler::PushPC() { void MacroAssembler::PushPC() {
// Push the current PC onto the stack as "return address" via calling // Push the current PC onto the stack as "return address" via calling
// the next instruction. // the next instruction.
Label get_pc; Label get_pc;
@ -1964,7 +1964,7 @@ void TurboAssembler::PushPC() {
bind(&get_pc); bind(&get_pc);
} }
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) { void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK_IMPLIES(options().isolate_independent_code, DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object)); Builtins::IsIsolateIndependentBuiltin(*code_object));
@ -1977,7 +1977,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode); call(code_object, rmode);
} }
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
static_assert(kSystemPointerSize == 4); static_assert(kSystemPointerSize == 4);
static_assert(kSmiShiftSize == 0); static_assert(kSmiShiftSize == 0);
@ -1993,13 +1993,13 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
IsolateData::builtin_entry_table_offset())); IsolateData::builtin_entry_table_offset()));
} }
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { void MacroAssembler::CallBuiltinByIndex(Register builtin_index) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
LoadEntryFromBuiltinIndex(builtin_index); LoadEntryFromBuiltinIndex(builtin_index);
call(builtin_index); call(builtin_index);
} }
void TurboAssembler::CallBuiltin(Builtin builtin) { void MacroAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
switch (options().builtin_call_jump_mode) { switch (options().builtin_call_jump_mode) {
case BuiltinCallJumpMode::kAbsolute: { case BuiltinCallJumpMode::kAbsolute: {
@ -2019,7 +2019,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
} }
} }
void TurboAssembler::TailCallBuiltin(Builtin builtin) { void MacroAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this, ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin)); CommentForOffHeapTrampoline("tail call", builtin));
switch (options().builtin_call_jump_mode) { switch (options().builtin_call_jump_mode) {
@ -2040,17 +2040,17 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
} }
} }
Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { Operand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin));
} }
void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) { void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
mov(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset)); mov(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset));
} }
void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
Register code_object) { Register code_object) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// Compute the InstructionStream object pointer from the code entry point. // Compute the InstructionStream object pointer from the code entry point.
@ -2058,12 +2058,12 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
sub(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag)); sub(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag));
} }
void TurboAssembler::CallCodeObject(Register code_object) { void MacroAssembler::CallCodeObject(Register code_object) {
LoadCodeEntry(code_object, code_object); LoadCodeEntry(code_object, code_object);
call(code_object); call(code_object);
} }
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
LoadCodeEntry(code_object, code_object); LoadCodeEntry(code_object, code_object);
switch (jump_mode) { switch (jump_mode) {
case JumpMode::kJump: case JumpMode::kJump:
@ -2076,13 +2076,13 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
} }
} }
void TurboAssembler::Jump(const ExternalReference& reference) { void MacroAssembler::Jump(const ExternalReference& reference) {
DCHECK(root_array_available()); DCHECK(root_array_available());
jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry( jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry(
isolate(), reference))); isolate(), reference)));
} }
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) { void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code, DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object)); Builtins::IsIsolateIndependentBuiltin(*code_object));
Builtin builtin = Builtin::kNoBuiltinId; Builtin builtin = Builtin::kNoBuiltinId;
@ -2094,7 +2094,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
jmp(code_object, rmode); jmp(code_object, rmode);
} }
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met, Condition cc, Label* condition_met,
Label::Distance condition_met_distance) { Label::Distance condition_met_distance) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
@ -2113,7 +2113,7 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
j(cc, condition_met, condition_met_distance); j(cc, condition_met, condition_met_distance);
} }
void TurboAssembler::ComputeCodeStartAddress(Register dst) { void MacroAssembler::ComputeCodeStartAddress(Register dst) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// In order to get the address of the current instruction, we first need // In order to get the address of the current instruction, we first need
// to use a call and then use a pop, thus pushing the return address to // to use a call and then use a pop, thus pushing the return address to
@ -2128,7 +2128,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
} }
} }
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret, DeoptimizeKind kind, Label* ret,
Label*) { Label*) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
@ -2138,8 +2138,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize); : Deoptimizer::kEagerDeoptExitSize);
} }
void TurboAssembler::Trap() { int3(); } void MacroAssembler::Trap() { int3(); }
void TurboAssembler::DebugBreak() { int3(); } void MacroAssembler::DebugBreak() { int3(); }
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -21,10 +21,10 @@
#include "src/codegen/ia32/assembler-ia32.h" #include "src/codegen/ia32/assembler-ia32.h"
#include "src/codegen/ia32/register-ia32.h" #include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/label.h" #include "src/codegen/label.h"
#include "src/codegen/macro-assembler-base.h"
#include "src/codegen/reglist.h" #include "src/codegen/reglist.h"
#include "src/codegen/reloc-info.h" #include "src/codegen/reloc-info.h"
#include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h" #include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h"
#include "src/codegen/turbo-assembler.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/execution/frames.h" #include "src/execution/frames.h"
#include "src/handles/handles.h" #include "src/handles/handles.h"
@ -68,10 +68,10 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor); DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
}; };
class V8_EXPORT_PRIVATE TurboAssembler class V8_EXPORT_PRIVATE MacroAssembler
: public SharedTurboAssemblerBase<TurboAssembler> { : public SharedMacroAssembler<MacroAssembler> {
public: public:
using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase; using SharedMacroAssembler<MacroAssembler>::SharedMacroAssembler;
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met, Label* condition_met,
@ -411,17 +411,6 @@ class V8_EXPORT_PRIVATE TurboAssembler
// Define an exception handler and bind a label. // Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); } void BindExceptionHandler(Label* label) { bind(label); }
protected:
// Drops arguments assuming that the return address was already popped.
void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
ArgumentsCountMode mode = kCountExcludesReceiver);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
void PushRoot(RootIndex index); void PushRoot(RootIndex index);
// Compare the object in a register to a value and jump if they are equal. // Compare the object in a register to a value and jump if they are equal.
@ -671,6 +660,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void StackOverflowCheck(Register num_args, Register scratch, void StackOverflowCheck(Register num_args, Register scratch,
Label* stack_overflow, bool include_receiver = false); Label* stack_overflow, bool include_receiver = false);
protected:
// Drops arguments assuming that the return address was already popped.
void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
ArgumentsCountMode mode = kCountExcludesReceiver);
private: private:
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count, void InvokePrologue(Register expected_parameter_count,

File diff suppressed because it is too large Load Diff

View File

@ -59,9 +59,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag); return MemOperand(object, offset - kHeapObjectTag);
} }
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public: public:
using TurboAssemblerBase::TurboAssemblerBase; using MacroAssemblerBase::MacroAssemblerBase;
// Activation support. // Activation support.
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);
@ -773,46 +773,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define an exception handler and bind a label. // Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); } void BindExceptionHandler(Label* label) { bind(label); }
protected:
inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch);
inline int32_t GetOffset(Label* L, OffsetSize bits);
private:
bool has_double_zero_reg_set_ = false;
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
bool BranchShortOrFallback(Label* L, Condition cond, Register rj,
const Operand& rk, bool need_link);
// f32 or f64
void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
CFRegister cd, bool f32 = true);
void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd,
bool f32 = true);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode);
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode);
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
// It assumes that the arguments are located below the stack pointer. // It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver. // argc is the number of arguments not including the receiver.
// TODO(LOONG_dev): LOONG64: Remove this function once we stick with the // TODO(LOONG_dev): LOONG64: Remove this function once we stick with the
@ -1079,17 +1039,50 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg); DecodeField<Field>(reg, reg);
} }
protected:
inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch);
inline int32_t GetOffset(Label* L, OffsetSize bits);
private: private:
bool has_double_zero_reg_set_ = false;
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count, void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done, Register actual_parameter_count, Label* done,
InvokeType type); InvokeType type);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
bool BranchShortOrFallback(Label* L, Condition cond, Register rj,
const Operand& rk, bool need_link);
// f32 or f64
void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
CFRegister cd, bool f32 = true);
void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd,
bool f32 = true);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode);
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode);
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
}; };
template <typename Func> template <typename Func>
void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) { Func GetLabelFunction) {
UseScratchRegisterScope scope(this); UseScratchRegisterScope scope(this);
Register scratch = scope.Acquire(); Register scratch = scope.Acquire();

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include "src/codegen/turbo-assembler.h" #include "src/codegen/macro-assembler-base.h"
#include "src/builtins/builtins.h" #include "src/builtins/builtins.h"
#include "src/builtins/constants-table-builder.h" #include "src/builtins/constants-table-builder.h"
@ -15,7 +15,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate, MacroAssemblerBase::MacroAssemblerBase(Isolate* isolate,
const AssemblerOptions& options, const AssemblerOptions& options,
CodeObjectRequired create_code_object, CodeObjectRequired create_code_object,
std::unique_ptr<AssemblerBuffer> buffer) std::unique_ptr<AssemblerBuffer> buffer)
@ -26,7 +26,7 @@ TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate,
} }
} }
Address TurboAssemblerBase::BuiltinEntry(Builtin builtin) { Address MacroAssemblerBase::BuiltinEntry(Builtin builtin) {
DCHECK(Builtins::IsBuiltinId(builtin)); DCHECK(Builtins::IsBuiltinId(builtin));
if (isolate_ != nullptr) { if (isolate_ != nullptr) {
Address entry = isolate_->builtin_entry_table()[Builtins::ToInt(builtin)]; Address entry = isolate_->builtin_entry_table()[Builtins::ToInt(builtin)];
@ -38,7 +38,7 @@ Address TurboAssemblerBase::BuiltinEntry(Builtin builtin) {
return d.InstructionStartOfBuiltin(builtin); return d.InstructionStartOfBuiltin(builtin);
} }
void TurboAssemblerBase::IndirectLoadConstant(Register destination, void MacroAssemblerBase::IndirectLoadConstant(Register destination,
Handle<HeapObject> object) { Handle<HeapObject> object) {
CHECK(root_array_available_); CHECK(root_array_available_);
@ -71,7 +71,7 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination,
} }
} }
void TurboAssemblerBase::IndirectLoadExternalReference( void MacroAssemblerBase::IndirectLoadExternalReference(
Register destination, ExternalReference reference) { Register destination, ExternalReference reference) {
CHECK(root_array_available_); CHECK(root_array_available_);
@ -90,24 +90,24 @@ void TurboAssemblerBase::IndirectLoadExternalReference(
} }
// static // static
int32_t TurboAssemblerBase::RootRegisterOffsetForRootIndex( int32_t MacroAssemblerBase::RootRegisterOffsetForRootIndex(
RootIndex root_index) { RootIndex root_index) {
return IsolateData::root_slot_offset(root_index); return IsolateData::root_slot_offset(root_index);
} }
// static // static
int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) { int32_t MacroAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) {
return IsolateData::BuiltinSlotOffset(builtin); return IsolateData::BuiltinSlotOffset(builtin);
} }
// static // static
intptr_t TurboAssemblerBase::RootRegisterOffsetForExternalReference( intptr_t MacroAssemblerBase::RootRegisterOffsetForExternalReference(
Isolate* isolate, const ExternalReference& reference) { Isolate* isolate, const ExternalReference& reference) {
return static_cast<intptr_t>(reference.address() - isolate->isolate_root()); return static_cast<intptr_t>(reference.address() - isolate->isolate_root());
} }
// static // static
int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry( int32_t MacroAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry(
Isolate* isolate, const ExternalReference& reference) { Isolate* isolate, const ExternalReference& reference) {
// Encode as an index into the external reference table stored on the // Encode as an index into the external reference table stored on the
// isolate. // isolate.
@ -120,13 +120,13 @@ int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry(
} }
// static // static
bool TurboAssemblerBase::IsAddressableThroughRootRegister( bool MacroAssemblerBase::IsAddressableThroughRootRegister(
Isolate* isolate, const ExternalReference& reference) { Isolate* isolate, const ExternalReference& reference) {
Address address = reference.address(); Address address = reference.address();
return isolate->root_register_addressable_region().contains(address); return isolate->root_register_addressable_region().contains(address);
} }
Tagged_t TurboAssemblerBase::ReadOnlyRootPtr(RootIndex index) { Tagged_t MacroAssemblerBase::ReadOnlyRootPtr(RootIndex index) {
DCHECK(RootsTable::IsReadOnly(index)); DCHECK(RootsTable::IsReadOnly(index));
CHECK(V8_STATIC_ROOTS_BOOL); CHECK(V8_STATIC_ROOTS_BOOL);
CHECK(isolate_->root(index).IsHeapObject()); CHECK(isolate_->root(index).IsHeapObject());

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef V8_CODEGEN_TURBO_ASSEMBLER_H_ #ifndef V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_
#define V8_CODEGEN_TURBO_ASSEMBLER_H_ #define V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_
#include <memory> #include <memory>
@ -15,30 +15,24 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Common base class for platform-specific TurboAssemblers containing // Common base class for platform-specific MacroAssemblers containing
// platform-independent bits. // platform-independent bits.
// You will encounter two subclasses, TurboAssembler (derives from // TODO(victorgomes): We should use LocalIsolate instead of Isolate in the
// TurboAssemblerBase), and MacroAssembler (derives from TurboAssembler). The // methods of this class.
// main difference is that MacroAssembler is allowed to access the isolate, and class V8_EXPORT_PRIVATE MacroAssemblerBase : public Assembler {
// TurboAssembler accesses the isolate in a very limited way. TurboAssembler
// contains all the functionality that is used by Turbofan, and does not expect
// to be running on the main thread.
class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
public: public:
// Constructors are declared public to inherit them in derived classes // Constructors are declared public to inherit them in derived classes
// with `using` directive. // with `using` directive.
TurboAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object, MacroAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object,
std::unique_ptr<AssemblerBuffer> buffer = {}) std::unique_ptr<AssemblerBuffer> buffer = {})
: TurboAssemblerBase(isolate, AssemblerOptions::Default(isolate), : MacroAssemblerBase(isolate, AssemblerOptions::Default(isolate),
create_code_object, std::move(buffer)) {} create_code_object, std::move(buffer)) {}
TurboAssemblerBase(Isolate* isolate, const AssemblerOptions& options, MacroAssemblerBase(Isolate* isolate, const AssemblerOptions& options,
CodeObjectRequired create_code_object, CodeObjectRequired create_code_object,
std::unique_ptr<AssemblerBuffer> buffer = {}); std::unique_ptr<AssemblerBuffer> buffer = {});
Isolate* isolate() const { Isolate* isolate() const { return isolate_; }
return isolate_;
}
Handle<HeapObject> CodeObject() const { Handle<HeapObject> CodeObject() const {
DCHECK(!code_object_.is_null()); DCHECK(!code_object_.is_null());
@ -135,25 +129,25 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
int comment_depth_ = 0; int comment_depth_ = 0;
DISALLOW_IMPLICIT_CONSTRUCTORS(TurboAssemblerBase); DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssemblerBase);
}; };
// Avoids emitting calls to the {Builtin::kAbort} builtin when emitting // Avoids emitting calls to the {Builtin::kAbort} builtin when emitting
// debug code during the lifetime of this scope object. // debug code during the lifetime of this scope object.
class V8_NODISCARD HardAbortScope { class V8_NODISCARD HardAbortScope {
public: public:
explicit HardAbortScope(TurboAssemblerBase* assembler) explicit HardAbortScope(MacroAssemblerBase* assembler)
: assembler_(assembler), old_value_(assembler->should_abort_hard()) { : assembler_(assembler), old_value_(assembler->should_abort_hard()) {
assembler_->set_abort_hard(true); assembler_->set_abort_hard(true);
} }
~HardAbortScope() { assembler_->set_abort_hard(old_value_); } ~HardAbortScope() { assembler_->set_abort_hard(old_value_); }
private: private:
TurboAssemblerBase* assembler_; MacroAssemblerBase* assembler_;
bool old_value_; bool old_value_;
}; };
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
#endif // V8_CODEGEN_TURBO_ASSEMBLER_H_ #endif // V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_

View File

@ -5,7 +5,7 @@
#ifndef V8_CODEGEN_MACRO_ASSEMBLER_H_ #ifndef V8_CODEGEN_MACRO_ASSEMBLER_H_
#define V8_CODEGEN_MACRO_ASSEMBLER_H_ #define V8_CODEGEN_MACRO_ASSEMBLER_H_
#include "src/codegen/turbo-assembler.h" #include "src/codegen/macro-assembler-base.h"
#include "src/execution/frames.h" #include "src/execution/frames.h"
#include "src/heap/heap.h" #include "src/heap/heap.h"
@ -82,25 +82,25 @@ static constexpr int kMaxCParameters = 256;
class V8_NODISCARD FrameScope { class V8_NODISCARD FrameScope {
public: public:
explicit FrameScope(TurboAssembler* tasm, StackFrame::Type type) explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
: :
#ifdef V8_CODE_COMMENTS #ifdef V8_CODE_COMMENTS
comment_(tasm, frame_name(type)), comment_(masm, frame_name(type)),
#endif #endif
tasm_(tasm), masm_(masm),
type_(type), type_(type),
old_has_frame_(tasm->has_frame()) { old_has_frame_(masm->has_frame()) {
tasm->set_has_frame(true); masm->set_has_frame(true);
if (type != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) { if (type != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
tasm->EnterFrame(type); masm->EnterFrame(type);
} }
} }
~FrameScope() { ~FrameScope() {
if (type_ != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) { if (type_ != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
tasm_->LeaveFrame(type_); masm_->LeaveFrame(type_);
} }
tasm_->set_has_frame(old_has_frame_); masm_->set_has_frame(old_has_frame_);
} }
private: private:
@ -125,7 +125,7 @@ class V8_NODISCARD FrameScope {
Assembler::CodeComment comment_; Assembler::CodeComment comment_;
#endif // V8_CODE_COMMENTS #endif // V8_CODE_COMMENTS
TurboAssembler* tasm_; MacroAssembler* masm_;
StackFrame::Type const type_; StackFrame::Type const type_;
bool const old_has_frame_; bool const old_has_frame_;
}; };
@ -198,7 +198,7 @@ class V8_NODISCARD AllowExternalCallThatCantCauseGC : public FrameScope {
// scope object. // scope object.
class V8_NODISCARD NoRootArrayScope { class V8_NODISCARD NoRootArrayScope {
public: public:
explicit NoRootArrayScope(TurboAssembler* masm) explicit NoRootArrayScope(MacroAssembler* masm)
: masm_(masm), old_value_(masm->root_array_available()) { : masm_(masm), old_value_(masm->root_array_available()) {
masm->set_root_array_available(false); masm->set_root_array_available(false);
} }
@ -206,7 +206,7 @@ class V8_NODISCARD NoRootArrayScope {
~NoRootArrayScope() { masm_->set_root_array_available(old_value_); } ~NoRootArrayScope() { masm_->set_root_array_available(old_value_); }
private: private:
TurboAssembler* masm_; MacroAssembler* masm_;
bool old_value_; bool old_value_;
}; };

View File

@ -819,7 +819,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
Instr instr_b = REGIMM | BGEZAL; // Branch and link. Instr instr_b = REGIMM | BGEZAL; // Branch and link.
instr_b = SetBranchOffset(pos, target_pos, instr_b); instr_b = SetBranchOffset(pos, target_pos, instr_b);
// Correct ra register to point to one instruction after jalr from // Correct ra register to point to one instruction after jalr from
// TurboAssembler::BranchAndLinkLong. // MacroAssembler::BranchAndLinkLong.
Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift | Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
kOptimizedBranchAndLinkLongReturnOffset; kOptimizedBranchAndLinkLongReturnOffset;

View File

@ -294,7 +294,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Adjust ra register in branch delay slot of bal instruction so to skip // Adjust ra register in branch delay slot of bal instruction so to skip
// instructions not needed after optimization of PIC in // instructions not needed after optimization of PIC in
// TurboAssembler::BranchAndLink method. // MacroAssembler::BranchAndLink method.
static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize; static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize;

File diff suppressed because it is too large Load Diff

View File

@ -90,9 +90,9 @@ inline MemOperand CFunctionArgumentOperand(int index) {
return MemOperand(sp, offset); return MemOperand(sp, offset);
} }
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public: public:
using TurboAssemblerBase::TurboAssemblerBase; using MacroAssemblerBase::MacroAssemblerBase;
// Activation support. // Activation support.
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);
@ -913,79 +913,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define an exception handler and bind a label. // Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); } void BindExceptionHandler(Label* label) { bind(label); }
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
private:
bool has_double_zero_reg_set_ = false;
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2);
void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
FPURegister cmp2);
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
// TODO(mips) Reorder parameters so out parameters come last.
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot);
bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot);
void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
void BranchAndLinkShortHelper(int16_t offset, Label* L,
BranchDelaySlot bdslot);
void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot);
void BranchLong(Label* L, BranchDelaySlot bdslot);
void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
template <typename RoundFunc>
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode,
RoundFunc round);
template <typename RoundFunc>
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode,
RoundFunc round);
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
// It assumes that the arguments are located below the stack pointer. // It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver. // argc is the number of arguments not including the receiver.
// TODO(victorgomes): Remove this function once we stick with the reversed // TODO(victorgomes): Remove this function once we stick with the reversed
@ -1269,17 +1196,83 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg); DecodeField<Field>(reg, reg);
} }
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
private: private:
bool has_double_zero_reg_set_ = false;
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count, void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done, Register actual_parameter_count, Label* done,
InvokeType type); InvokeType type);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2);
void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
FPURegister cmp2);
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
// TODO(mips) Reorder parameters so out parameters come last.
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot);
bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot);
void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
void BranchAndLinkShortHelper(int16_t offset, Label* L,
BranchDelaySlot bdslot);
void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot);
void BranchLong(Label* L, BranchDelaySlot bdslot);
void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
template <typename RoundFunc>
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode,
RoundFunc round);
template <typename RoundFunc>
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode,
RoundFunc round);
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
}; };
template <typename Func> template <typename Func>
void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) { Func GetLabelFunction) {
// Ensure that dd-ed labels following this instruction use 8 bytes aligned // Ensure that dd-ed labels following this instruction use 8 bytes aligned
// addresses. // addresses.

View File

@ -1570,7 +1570,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
private: private:
friend class Assembler; friend class Assembler;
friend class TurboAssembler; friend class MacroAssembler;
Assembler* assembler_; Assembler* assembler_;
RegList old_available_; RegList old_available_;

File diff suppressed because it is too large Load Diff

View File

@ -47,9 +47,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
#define ClearRightImm clrrwi #define ClearRightImm clrrwi
#endif #endif
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public: public:
using TurboAssemblerBase::TurboAssemblerBase; using MacroAssemblerBase::MacroAssemblerBase;
void CallBuiltin(Builtin builtin, Condition cond = al); void CallBuiltin(Builtin builtin, Condition cond = al);
void TailCallBuiltin(Builtin builtin, Condition cond = al, void TailCallBuiltin(Builtin builtin, Condition cond = al,
@ -1438,21 +1438,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void S128Select(Simd128Register dst, Simd128Register src1, void S128Select(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register mask); Simd128Register src2, Simd128Register mask);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments,
bool has_function_descriptor);
};
// MacroAssembler implements a collection of frequently used acros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
// It assumes that the arguments are located below the stack pointer. // It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver. // argc is the number of arguments not including the receiver.
// TODO(victorgomes): Remove this function once we stick with the reversed // TODO(victorgomes): Remove this function once we stick with the reversed
@ -1745,6 +1730,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
private: private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize; static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments,
bool has_function_descriptor);
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count, void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done, Register actual_parameter_count, Label* done,

File diff suppressed because it is too large Load Diff

View File

@ -90,9 +90,9 @@ inline MemOperand CFunctionArgumentOperand(int index) {
return MemOperand(sp, offset); return MemOperand(sp, offset);
} }
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public: public:
using TurboAssemblerBase::TurboAssemblerBase; using MacroAssemblerBase::MacroAssemblerBase;
// Activation support. // Activation support.
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);
@ -1174,71 +1174,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src); void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src);
void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst); void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst);
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
private:
bool has_double_zero_reg_set_ = false;
bool has_single_zero_reg_set_ = false;
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
// TODO(RISCV) Reorder parameters so out parameters come last.
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt);
void BranchShortHelper(int32_t offset, Label* L);
bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
const Operand& rt);
bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
const Operand& rt);
void BranchAndLinkShortHelper(int32_t offset, Label* L);
void BranchAndLinkShort(int32_t offset);
void BranchAndLinkShort(Label* L);
bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
void BranchAndLinkLong(Label* L);
#if V8_TARGET_ARCH_RISCV64
template <typename F_TYPE>
void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
FPURoundingMode mode);
#elif V8_TARGET_ARCH_RISCV32
void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
FPURoundingMode mode);
void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
FPURoundingMode mode);
#endif
template <typename F>
void RoundHelper(VRegister dst, VRegister src, Register scratch,
VRegister v_scratch, FPURoundingMode frm);
template <typename TruncFunc>
void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
TruncFunc trunc);
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
// It assumes that the arguments are located below the stack pointer. // It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver. // argc is the number of arguments not including the receiver.
// TODO(victorgomes): Remove this function once we stick with the reversed // TODO(victorgomes): Remove this function once we stick with the reversed
@ -1521,7 +1456,65 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg); DecodeField<Field>(reg, reg);
} }
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
private: private:
bool has_double_zero_reg_set_ = false;
bool has_single_zero_reg_set_ = false;
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
// TODO(RISCV) Reorder parameters so out parameters come last.
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt);
void BranchShortHelper(int32_t offset, Label* L);
bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
const Operand& rt);
bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
const Operand& rt);
void BranchAndLinkShortHelper(int32_t offset, Label* L);
void BranchAndLinkShort(int32_t offset);
void BranchAndLinkShort(Label* L);
bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
void BranchAndLinkLong(Label* L);
#if V8_TARGET_ARCH_RISCV64
template <typename F_TYPE>
void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
FPURoundingMode mode);
#elif V8_TARGET_ARCH_RISCV32
void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
FPURoundingMode mode);
void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
FPURoundingMode mode);
#endif
template <typename F>
void RoundHelper(VRegister dst, VRegister src, Register scratch,
VRegister v_scratch, FPURoundingMode frm);
template <typename TruncFunc>
void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
TruncFunc trunc);
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count, void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done, Register actual_parameter_count, Label* done,
@ -1538,7 +1531,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}; };
template <typename Func> template <typename Func>
void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) { Func GetLabelFunction) {
// Ensure that dd-ed labels following this instruction use 8 bytes aligned // Ensure that dd-ed labels following this instruction use 8 bytes aligned
// addresses. // addresses.

View File

@ -1494,7 +1494,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
private: private:
friend class Assembler; friend class Assembler;
friend class TurboAssembler; friend class MacroAssembler;
Assembler* assembler_; Assembler* assembler_;
RegList old_available_; RegList old_available_;

File diff suppressed because it is too large Load Diff

View File

@ -41,9 +41,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg5 = no_reg, Register reg5 = no_reg,
Register reg6 = no_reg); Register reg6 = no_reg);
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public: public:
using TurboAssemblerBase::TurboAssemblerBase; using MacroAssemblerBase::MacroAssemblerBase;
void CallBuiltin(Builtin builtin, Condition cond = al); void CallBuiltin(Builtin builtin, Condition cond = al);
void TailCallBuiltin(Builtin builtin, Condition cond = al); void TailCallBuiltin(Builtin builtin, Condition cond = al);
@ -1502,22 +1502,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CountTrailingZerosU64(Register dst, Register src, void CountTrailingZerosU64(Register dst, Register src,
Register scratch_pair = r0); Register scratch_pair = r0);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
void LoadStackLimit(Register destination, StackLimitKind kind); void LoadStackLimit(Register destination, StackLimitKind kind);
// It assumes that the arguments are located below the stack pointer. // It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver. // argc is the number of arguments not including the receiver.
@ -1803,6 +1787,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
private: private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize; static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count, void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done, Register actual_parameter_count, Label* done,

View File

@ -27,7 +27,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
void SharedTurboAssembler::Move(Register dst, uint32_t src) { void SharedMacroAssemblerBase::Move(Register dst, uint32_t src) {
// Helper to paper over the different assembler function names. // Helper to paper over the different assembler function names.
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
mov(dst, Immediate(src)); mov(dst, Immediate(src));
@ -38,7 +38,7 @@ void SharedTurboAssembler::Move(Register dst, uint32_t src) {
#endif #endif
} }
void SharedTurboAssembler::Move(Register dst, Register src) { void SharedMacroAssemblerBase::Move(Register dst, Register src) {
// Helper to paper over the different assembler function names. // Helper to paper over the different assembler function names.
if (dst != src) { if (dst != src) {
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
@ -51,7 +51,7 @@ void SharedTurboAssembler::Move(Register dst, Register src) {
} }
} }
void SharedTurboAssembler::Add(Register dst, Immediate src) { void SharedMacroAssemblerBase::Add(Register dst, Immediate src) {
// Helper to paper over the different assembler function names. // Helper to paper over the different assembler function names.
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
add(dst, src); add(dst, src);
@ -62,7 +62,7 @@ void SharedTurboAssembler::Add(Register dst, Immediate src) {
#endif #endif
} }
void SharedTurboAssembler::And(Register dst, Immediate src) { void SharedMacroAssemblerBase::And(Register dst, Immediate src) {
// Helper to paper over the different assembler function names. // Helper to paper over the different assembler function names.
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
and_(dst, src); and_(dst, src);
@ -77,8 +77,8 @@ void SharedTurboAssembler::And(Register dst, Immediate src) {
#endif #endif
} }
void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::Movhps(XMMRegister dst, XMMRegister src1,
Operand src2) { Operand src2) {
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX); CpuFeatureScope scope(this, AVX);
vmovhps(dst, src1, src2); vmovhps(dst, src1, src2);
@ -90,8 +90,8 @@ void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1,
} }
} }
void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::Movlps(XMMRegister dst, XMMRegister src1,
Operand src2) { Operand src2) {
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX); CpuFeatureScope scope(this, AVX);
vmovlps(dst, src1, src2); vmovlps(dst, src1, src2);
@ -102,8 +102,8 @@ void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1,
movlps(dst, src2); movlps(dst, src2);
} }
} }
void SharedTurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::Blendvpd(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister mask) { XMMRegister src2, XMMRegister mask) {
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX); CpuFeatureScope scope(this, AVX);
vblendvpd(dst, src1, src2, mask); vblendvpd(dst, src1, src2, mask);
@ -115,8 +115,8 @@ void SharedTurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1,
} }
} }
void SharedTurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::Blendvps(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister mask) { XMMRegister src2, XMMRegister mask) {
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX); CpuFeatureScope scope(this, AVX);
vblendvps(dst, src1, src2, mask); vblendvps(dst, src1, src2, mask);
@ -128,8 +128,8 @@ void SharedTurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1,
} }
} }
void SharedTurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::Pblendvb(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister mask) { XMMRegister src2, XMMRegister mask) {
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX); CpuFeatureScope scope(this, AVX);
vpblendvb(dst, src1, src2, mask); vpblendvb(dst, src1, src2, mask);
@ -141,8 +141,8 @@ void SharedTurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
} }
} }
void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::Shufps(XMMRegister dst, XMMRegister src1,
XMMRegister src2, uint8_t imm8) { XMMRegister src2, uint8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
vshufps(dst, src1, src2, imm8); vshufps(dst, src1, src2, imm8);
@ -154,8 +154,8 @@ void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
} }
} }
void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src, void SharedMacroAssemblerBase::F64x2ExtractLane(DoubleRegister dst,
uint8_t lane) { XMMRegister src, uint8_t lane) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (lane == 0) { if (lane == 0) {
if (dst != src) { if (dst != src) {
@ -173,8 +173,10 @@ void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src,
} }
} }
void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src, void SharedMacroAssemblerBase::F64x2ReplaceLane(XMMRegister dst,
DoubleRegister rep, uint8_t lane) { XMMRegister src,
DoubleRegister rep,
uint8_t lane) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX); CpuFeatureScope scope(this, AVX);
@ -197,8 +199,8 @@ void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
} }
} }
void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs, void SharedMacroAssemblerBase::F32x4Min(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) { XMMRegister rhs, XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// The minps instruction doesn't propagate NaNs and +0's in its first // The minps instruction doesn't propagate NaNs and +0's in its first
// operand. Perform minps in both orders, merge the results, and adjust. // operand. Perform minps in both orders, merge the results, and adjust.
@ -226,8 +228,8 @@ void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs,
Andnps(dst, dst, scratch); Andnps(dst, dst, scratch);
} }
void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs, void SharedMacroAssemblerBase::F32x4Max(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) { XMMRegister rhs, XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// The maxps instruction doesn't propagate NaNs and +0's in its first // The maxps instruction doesn't propagate NaNs and +0's in its first
// operand. Perform maxps in both orders, merge the results, and adjust. // operand. Perform maxps in both orders, merge the results, and adjust.
@ -258,8 +260,8 @@ void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs,
Andnps(dst, dst, scratch); Andnps(dst, dst, scratch);
} }
void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs, void SharedMacroAssemblerBase::F64x2Min(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) { XMMRegister rhs, XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX); CpuFeatureScope scope(this, AVX);
@ -296,8 +298,8 @@ void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs,
} }
} }
void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs, void SharedMacroAssemblerBase::F64x2Max(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) { XMMRegister rhs, XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX); CpuFeatureScope scope(this, AVX);
@ -336,7 +338,7 @@ void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs,
} }
} }
void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) { void SharedMacroAssemblerBase::F32x4Splat(XMMRegister dst, DoubleRegister src) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX2)) { if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2); CpuFeatureScope avx2_scope(this, AVX2);
@ -354,8 +356,8 @@ void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) {
} }
} }
void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src, void SharedMacroAssemblerBase::F32x4ExtractLane(FloatRegister dst,
uint8_t lane) { XMMRegister src, uint8_t lane) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK_LT(lane, 4); DCHECK_LT(lane, 4);
// These instructions are shorter than insertps, but will leave junk in // These instructions are shorter than insertps, but will leave junk in
@ -376,8 +378,8 @@ void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src,
} }
} }
void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src, void SharedMacroAssemblerBase::S128Store32Lane(Operand dst, XMMRegister src,
uint8_t laneidx) { uint8_t laneidx) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (laneidx == 0) { if (laneidx == 0) {
Movss(dst, src); Movss(dst, src);
@ -388,8 +390,8 @@ void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
} }
template <typename Op> template <typename Op>
void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src, void SharedMacroAssemblerBase::I8x16SplatPreAvx2(XMMRegister dst, Op src,
XMMRegister scratch) { XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(!CpuFeatures::IsSupported(AVX2)); DCHECK(!CpuFeatures::IsSupported(AVX2));
CpuFeatureScope ssse3_scope(this, SSSE3); CpuFeatureScope ssse3_scope(this, SSSE3);
@ -398,8 +400,8 @@ void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src,
Pshufb(dst, scratch); Pshufb(dst, scratch);
} }
void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src, void SharedMacroAssemblerBase::I8x16Splat(XMMRegister dst, Register src,
XMMRegister scratch) { XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX2)) { if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2); CpuFeatureScope avx2_scope(this, AVX2);
@ -410,8 +412,8 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src,
} }
} }
void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src, void SharedMacroAssemblerBase::I8x16Splat(XMMRegister dst, Operand src,
XMMRegister scratch) { XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK_OPERAND_IS_NOT_REG(src); DCHECK_OPERAND_IS_NOT_REG(src);
if (CpuFeatures::IsSupported(AVX2)) { if (CpuFeatures::IsSupported(AVX2)) {
@ -422,9 +424,9 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src,
} }
} }
void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::I8x16Shl(XMMRegister dst, XMMRegister src1,
uint8_t src2, Register tmp1, uint8_t src2, Register tmp1,
XMMRegister tmp2) { XMMRegister tmp2) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK_NE(dst, tmp2); DCHECK_NE(dst, tmp2);
// Perform 16-bit shift, then mask away low bits. // Perform 16-bit shift, then mask away low bits.
@ -444,9 +446,9 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
Pand(dst, tmp2); Pand(dst, tmp2);
} }
void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::I8x16Shl(XMMRegister dst, XMMRegister src1,
Register src2, Register tmp1, Register src2, Register tmp1,
XMMRegister tmp2, XMMRegister tmp3) { XMMRegister tmp2, XMMRegister tmp3) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp2, tmp3)); DCHECK(!AreAliased(dst, tmp2, tmp3));
DCHECK(!AreAliased(src1, tmp2, tmp3)); DCHECK(!AreAliased(src1, tmp2, tmp3));
@ -471,8 +473,8 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
Psllw(dst, dst, tmp3); Psllw(dst, dst, tmp3);
} }
void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::I8x16ShrS(XMMRegister dst, XMMRegister src1,
uint8_t src2, XMMRegister tmp) { uint8_t src2, XMMRegister tmp) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// Unpack bytes into words, do word (16-bit) shifts, and repack. // Unpack bytes into words, do word (16-bit) shifts, and repack.
DCHECK_NE(dst, tmp); DCHECK_NE(dst, tmp);
@ -485,9 +487,9 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
Packsswb(dst, tmp); Packsswb(dst, tmp);
} }
void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::I8x16ShrS(XMMRegister dst, XMMRegister src1,
Register src2, Register tmp1, Register src2, Register tmp1,
XMMRegister tmp2, XMMRegister tmp3) { XMMRegister tmp2, XMMRegister tmp3) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp2, tmp3)); DCHECK(!AreAliased(dst, tmp2, tmp3));
DCHECK_NE(src1, tmp2); DCHECK_NE(src1, tmp2);
@ -506,9 +508,9 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
Packsswb(dst, tmp2); Packsswb(dst, tmp2);
} }
void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::I8x16ShrU(XMMRegister dst, XMMRegister src1,
uint8_t src2, Register tmp1, uint8_t src2, Register tmp1,
XMMRegister tmp2) { XMMRegister tmp2) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK_NE(dst, tmp2); DCHECK_NE(dst, tmp2);
if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) { if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
@ -528,9 +530,9 @@ void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
Pand(dst, tmp2); Pand(dst, tmp2);
} }
void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::I8x16ShrU(XMMRegister dst, XMMRegister src1,
Register src2, Register tmp1, Register src2, Register tmp1,
XMMRegister tmp2, XMMRegister tmp3) { XMMRegister tmp2, XMMRegister tmp3) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp2, tmp3)); DCHECK(!AreAliased(dst, tmp2, tmp3));
DCHECK_NE(src1, tmp2); DCHECK_NE(src1, tmp2);
@ -550,14 +552,14 @@ void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
} }
template <typename Op> template <typename Op>
void SharedTurboAssembler::I16x8SplatPreAvx2(XMMRegister dst, Op src) { void SharedMacroAssemblerBase::I16x8SplatPreAvx2(XMMRegister dst, Op src) {
DCHECK(!CpuFeatures::IsSupported(AVX2)); DCHECK(!CpuFeatures::IsSupported(AVX2));
Movd(dst, src); Movd(dst, src);
Pshuflw(dst, dst, uint8_t{0x0}); Pshuflw(dst, dst, uint8_t{0x0});
Punpcklqdq(dst, dst); Punpcklqdq(dst, dst);
} }
void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) { void SharedMacroAssemblerBase::I16x8Splat(XMMRegister dst, Register src) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX2)) { if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2); CpuFeatureScope avx2_scope(this, AVX2);
@ -568,7 +570,7 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) {
} }
} }
void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) { void SharedMacroAssemblerBase::I16x8Splat(XMMRegister dst, Operand src) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK_OPERAND_IS_NOT_REG(src); DCHECK_OPERAND_IS_NOT_REG(src);
if (CpuFeatures::IsSupported(AVX2)) { if (CpuFeatures::IsSupported(AVX2)) {
@ -579,18 +581,20 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) {
} }
} }
void SharedTurboAssembler::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch, XMMRegister src2,
bool is_signed) { XMMRegister scratch,
bool is_signed) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
is_signed ? Pmovsxbw(scratch, src1) : Pmovzxbw(scratch, src1); is_signed ? Pmovsxbw(scratch, src1) : Pmovzxbw(scratch, src1);
is_signed ? Pmovsxbw(dst, src2) : Pmovzxbw(dst, src2); is_signed ? Pmovsxbw(dst, src2) : Pmovzxbw(dst, src2);
Pmullw(dst, scratch); Pmullw(dst, scratch);
} }
void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::I16x8ExtMulHighS(XMMRegister dst,
XMMRegister src2, XMMRegister src1,
XMMRegister scratch) { XMMRegister src2,
XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -612,9 +616,10 @@ void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1,
} }
} }
void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::I16x8ExtMulHighU(XMMRegister dst,
XMMRegister src2, XMMRegister src1,
XMMRegister scratch) { XMMRegister src2,
XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// The logic here is slightly complicated to handle all the cases of register // The logic here is slightly complicated to handle all the cases of register
// aliasing. This allows flexibility for callers in TurboFan and Liftoff. // aliasing. This allows flexibility for callers in TurboFan and Liftoff.
@ -662,8 +667,8 @@ void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1,
} }
} }
void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst, void SharedMacroAssemblerBase::I16x8SConvertI8x16High(XMMRegister dst,
XMMRegister src) { XMMRegister src) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -685,9 +690,9 @@ void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst,
} }
} }
void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst, void SharedMacroAssemblerBase::I16x8UConvertI8x16High(XMMRegister dst,
XMMRegister src, XMMRegister src,
XMMRegister scratch) { XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -711,9 +716,10 @@ void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst,
} }
} }
void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::I16x8Q15MulRSatS(XMMRegister dst,
XMMRegister src2, XMMRegister src1,
XMMRegister scratch) { XMMRegister src2,
XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// k = i16x8.splat(0x8000) // k = i16x8.splat(0x8000)
Pcmpeqd(scratch, scratch); Pcmpeqd(scratch, scratch);
@ -729,9 +735,9 @@ void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
Pxor(dst, scratch); Pxor(dst, scratch);
} }
void SharedTurboAssembler::I16x8DotI8x16I7x16S(XMMRegister dst, void SharedMacroAssemblerBase::I16x8DotI8x16I7x16S(XMMRegister dst,
XMMRegister src1, XMMRegister src1,
XMMRegister src2) { XMMRegister src2) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -744,7 +750,7 @@ void SharedTurboAssembler::I16x8DotI8x16I7x16S(XMMRegister dst,
} }
} }
void SharedTurboAssembler::I32x4DotI8x16I7x16AddS( void SharedMacroAssemblerBase::I32x4DotI8x16I7x16AddS(
XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister src3, XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister src3,
XMMRegister scratch, XMMRegister splat_reg) { XMMRegister scratch, XMMRegister splat_reg) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
@ -768,9 +774,9 @@ void SharedTurboAssembler::I32x4DotI8x16I7x16AddS(
} }
} }
void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst, void SharedMacroAssemblerBase::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
XMMRegister src, XMMRegister src,
XMMRegister tmp) { XMMRegister tmp) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -812,9 +818,10 @@ void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
// 1. Multiply low word into scratch. // 1. Multiply low word into scratch.
// 2. Multiply high word (can be signed or unsigned) into dst. // 2. Multiply high word (can be signed or unsigned) into dst.
// 3. Unpack and interleave scratch and dst into dst. // 3. Unpack and interleave scratch and dst into dst.
void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch, XMMRegister src2,
bool low, bool is_signed) { XMMRegister scratch, bool low,
bool is_signed) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -830,8 +837,8 @@ void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
} }
} }
void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst, void SharedMacroAssemblerBase::I32x4SConvertI16x8High(XMMRegister dst,
XMMRegister src) { XMMRegister src) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -853,9 +860,9 @@ void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst,
} }
} }
void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst, void SharedMacroAssemblerBase::I32x4UConvertI16x8High(XMMRegister dst,
XMMRegister src, XMMRegister src,
XMMRegister scratch) { XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -879,8 +886,8 @@ void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
} }
} }
void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src, void SharedMacroAssemblerBase::I64x2Neg(XMMRegister dst, XMMRegister src,
XMMRegister scratch) { XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX); CpuFeatureScope scope(this, AVX);
@ -896,8 +903,8 @@ void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src,
} }
} }
void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src, void SharedMacroAssemblerBase::I64x2Abs(XMMRegister dst, XMMRegister src,
XMMRegister scratch) { XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -917,8 +924,8 @@ void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
} }
} }
void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0, void SharedMacroAssemblerBase::I64x2GtS(XMMRegister dst, XMMRegister src0,
XMMRegister src1, XMMRegister scratch) { XMMRegister src1, XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -951,8 +958,8 @@ void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
} }
} }
void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0, void SharedMacroAssemblerBase::I64x2GeS(XMMRegister dst, XMMRegister src0,
XMMRegister src1, XMMRegister scratch) { XMMRegister src1, XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -986,8 +993,8 @@ void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
} }
} }
void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src, void SharedMacroAssemblerBase::I64x2ShrS(XMMRegister dst, XMMRegister src,
uint8_t shift, XMMRegister xmm_tmp) { uint8_t shift, XMMRegister xmm_tmp) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK_GT(64, shift); DCHECK_GT(64, shift);
DCHECK_NE(xmm_tmp, dst); DCHECK_NE(xmm_tmp, dst);
@ -1019,10 +1026,10 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Psubq(dst, xmm_tmp); Psubq(dst, xmm_tmp);
} }
void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src, void SharedMacroAssemblerBase::I64x2ShrS(XMMRegister dst, XMMRegister src,
Register shift, XMMRegister xmm_tmp, Register shift, XMMRegister xmm_tmp,
XMMRegister xmm_shift, XMMRegister xmm_shift,
Register tmp_shift) { Register tmp_shift) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK_NE(xmm_tmp, dst); DCHECK_NE(xmm_tmp, dst);
DCHECK_NE(xmm_tmp, src); DCHECK_NE(xmm_tmp, src);
@ -1049,9 +1056,9 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Psubq(dst, xmm_tmp); Psubq(dst, xmm_tmp);
} }
void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs, void SharedMacroAssemblerBase::I64x2Mul(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister tmp1, XMMRegister rhs, XMMRegister tmp1,
XMMRegister tmp2) { XMMRegister tmp2) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp1, tmp2)); DCHECK(!AreAliased(dst, tmp1, tmp2));
DCHECK(!AreAliased(lhs, tmp1, tmp2)); DCHECK(!AreAliased(lhs, tmp1, tmp2));
@ -1099,9 +1106,10 @@ void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs,
// 2. Unpack src1, src0 into even-number elements of dst. // 2. Unpack src1, src0 into even-number elements of dst.
// 3. Multiply 1. with 2. // 3. Multiply 1. with 2.
// For non-AVX, use non-destructive pshufd instead of punpckldq/punpckhdq. // For non-AVX, use non-destructive pshufd instead of punpckldq/punpckhdq.
void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch, XMMRegister src2,
bool low, bool is_signed) { XMMRegister scratch, bool low,
bool is_signed) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -1130,8 +1138,8 @@ void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
} }
} }
void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst, void SharedMacroAssemblerBase::I64x2SConvertI32x4High(XMMRegister dst,
XMMRegister src) { XMMRegister src) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -1148,9 +1156,9 @@ void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst,
} }
} }
void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst, void SharedMacroAssemblerBase::I64x2UConvertI32x4High(XMMRegister dst,
XMMRegister src, XMMRegister src,
XMMRegister scratch) { XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX); CpuFeatureScope avx_scope(this, AVX);
@ -1170,8 +1178,8 @@ void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
} }
} }
void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src, void SharedMacroAssemblerBase::S128Not(XMMRegister dst, XMMRegister src,
XMMRegister scratch) { XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (dst == src) { if (dst == src) {
Pcmpeqd(scratch, scratch); Pcmpeqd(scratch, scratch);
@ -1182,9 +1190,9 @@ void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src,
} }
} }
void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask, void SharedMacroAssemblerBase::S128Select(XMMRegister dst, XMMRegister mask,
XMMRegister src1, XMMRegister src2, XMMRegister src1, XMMRegister src2,
XMMRegister scratch) { XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// v128.select = v128.or(v128.and(v1, c), v128.andnot(v2, c)). // v128.select = v128.or(v128.and(v1, c), v128.andnot(v2, c)).
// pandn(x, y) = !x & y, so we have to flip the mask and input. // pandn(x, y) = !x & y, so we have to flip the mask and input.
@ -1203,8 +1211,8 @@ void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
} }
} }
void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src, void SharedMacroAssemblerBase::S128Load8Splat(XMMRegister dst, Operand src,
XMMRegister scratch) { XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// The trap handler uses the current pc to creating a landing, so that it can // The trap handler uses the current pc to creating a landing, so that it can
// determine if a trap occured in Wasm code due to a OOB load. Make sure the // determine if a trap occured in Wasm code due to a OOB load. Make sure the
@ -1226,8 +1234,8 @@ void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src,
} }
} }
void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src, void SharedMacroAssemblerBase::S128Load16Splat(XMMRegister dst, Operand src,
XMMRegister scratch) { XMMRegister scratch) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// The trap handler uses the current pc to creating a landing, so that it can // The trap handler uses the current pc to creating a landing, so that it can
// determine if a trap occured in Wasm code due to a OOB load. Make sure the // determine if a trap occured in Wasm code due to a OOB load. Make sure the
@ -1248,7 +1256,7 @@ void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src,
} }
} }
void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) { void SharedMacroAssemblerBase::S128Load32Splat(XMMRegister dst, Operand src) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// The trap handler uses the current pc to creating a landing, so that it can // The trap handler uses the current pc to creating a landing, so that it can
// determine if a trap occured in Wasm code due to a OOB load. Make sure the // determine if a trap occured in Wasm code due to a OOB load. Make sure the
@ -1262,8 +1270,8 @@ void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) {
} }
} }
void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src, void SharedMacroAssemblerBase::S128Store64Lane(Operand dst, XMMRegister src,
uint8_t laneidx) { uint8_t laneidx) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
if (laneidx == 0) { if (laneidx == 0) {
Movlps(dst, src); Movlps(dst, src);
@ -1342,27 +1350,27 @@ void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
sub##ps_or_pd(dst, tmp); \ sub##ps_or_pd(dst, tmp); \
} }
void SharedTurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::F32x4Qfma(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister src3, XMMRegister src2, XMMRegister src3,
XMMRegister tmp) { XMMRegister tmp) {
QFMA(ps) QFMA(ps)
} }
void SharedTurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::F32x4Qfms(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister src3, XMMRegister src2, XMMRegister src3,
XMMRegister tmp) { XMMRegister tmp) {
QFMS(ps) QFMS(ps)
} }
void SharedTurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::F64x2Qfma(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister src3, XMMRegister src2, XMMRegister src3,
XMMRegister tmp) { XMMRegister tmp) {
QFMA(pd); QFMA(pd);
} }
void SharedTurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1, void SharedMacroAssemblerBase::F64x2Qfms(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister src3, XMMRegister src2, XMMRegister src3,
XMMRegister tmp) { XMMRegister tmp) {
QFMS(pd); QFMS(pd);
} }

View File

@ -8,7 +8,7 @@
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/codegen/cpu-features.h" #include "src/codegen/cpu-features.h"
#include "src/codegen/external-reference.h" #include "src/codegen/external-reference.h"
#include "src/codegen/turbo-assembler.h" #include "src/codegen/macro-assembler-base.h"
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
#include "src/codegen/ia32/register-ia32.h" #include "src/codegen/ia32/register-ia32.h"
@ -30,15 +30,15 @@ constexpr int kStackSavedSavedFPSize = 2 * kDoubleSize;
constexpr int kStackSavedSavedFPSize = kDoubleSize; constexpr int kStackSavedSavedFPSize = kDoubleSize;
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY
// Base class for SharedTurboAssemblerBase. This class contains macro-assembler // Base class for SharedMacroAssembler. This class contains macro-assembler
// functions that can be shared across ia32 and x64 without any template // functions that can be shared across ia32 and x64 without any template
// machinery, i.e. does not require the CRTP pattern that // machinery, i.e. does not require the CRTP pattern that
// SharedTurboAssemblerBase exposes. This allows us to keep the bulk of // SharedMacroAssembler exposes. This allows us to keep the bulk of
// definition inside a separate source file, rather than putting everything // definition inside a separate source file, rather than putting everything
// inside this header. // inside this header.
class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase { class V8_EXPORT_PRIVATE SharedMacroAssemblerBase : public MacroAssemblerBase {
public: public:
using TurboAssemblerBase::TurboAssemblerBase; using MacroAssemblerBase::MacroAssemblerBase;
void Move(Register dst, uint32_t src); void Move(Register dst, uint32_t src);
// Move if registers are not identical. // Move if registers are not identical.
@ -530,41 +530,41 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void I16x8SplatPreAvx2(XMMRegister dst, Op src); void I16x8SplatPreAvx2(XMMRegister dst, Op src);
}; };
// Common base class template shared by ia32 and x64 TurboAssembler. This uses // Common base class template shared by ia32 and x64 MacroAssembler. This uses
// the Curiously Recurring Template Pattern (CRTP), where Impl is the actual // the Curiously Recurring Template Pattern (CRTP), where Impl is the actual
// class (subclass of SharedTurboAssemblerBase instantiated with the actual // class (subclass of SharedMacroAssembler instantiated with the actual
// class). This allows static polymorphism, where member functions can be move // class). This allows static polymorphism, where member functions can be move
// into SharedTurboAssembler, and we can also call into member functions // into SharedMacroAssemblerBase, and we can also call into member functions
// defined in ia32 or x64 specific TurboAssembler from within this template // defined in ia32 or x64 specific MacroAssembler from within this template
// class, via Impl. // class, via Impl.
// //
// Note: all member functions must be defined in this header file so that the // Note: all member functions must be defined in this header file so that the
// compiler can generate code for the function definitions. See // compiler can generate code for the function definitions. See
// https://isocpp.org/wiki/faq/templates#templates-defn-vs-decl for rationale. // https://isocpp.org/wiki/faq/templates#templates-defn-vs-decl for rationale.
// If a function does not need polymorphism, move it into SharedTurboAssembler, // If a function does not need polymorphism, move it into
// and define it outside of this header. // SharedMacroAssemblerBase, and define it outside of this header.
template <typename Impl> template <typename Impl>
class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler { class V8_EXPORT_PRIVATE SharedMacroAssembler : public SharedMacroAssemblerBase {
using SharedTurboAssembler::SharedTurboAssembler; using SharedMacroAssemblerBase::SharedMacroAssemblerBase;
public: public:
void Abspd(XMMRegister dst, XMMRegister src, Register tmp) { void Abspd(XMMRegister dst, XMMRegister src, Register tmp) {
FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps, FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Andps,
ExternalReference::address_of_double_abs_constant()); ExternalReference::address_of_double_abs_constant());
} }
void Absps(XMMRegister dst, XMMRegister src, Register tmp) { void Absps(XMMRegister dst, XMMRegister src, Register tmp) {
FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps, FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Andps,
ExternalReference::address_of_float_abs_constant()); ExternalReference::address_of_float_abs_constant());
} }
void Negpd(XMMRegister dst, XMMRegister src, Register tmp) { void Negpd(XMMRegister dst, XMMRegister src, Register tmp) {
FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps, FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Xorps,
ExternalReference::address_of_double_neg_constant()); ExternalReference::address_of_double_neg_constant());
} }
void Negps(XMMRegister dst, XMMRegister src, Register tmp) { void Negps(XMMRegister dst, XMMRegister src, Register tmp) {
FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps, FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Xorps,
ExternalReference::address_of_float_neg_constant()); ExternalReference::address_of_float_neg_constant());
} }
#undef FLOAT_UNOP #undef FLOAT_UNOP
@ -975,15 +975,16 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
return impl()->ExternalReferenceAsOperand(reference, scratch); return impl()->ExternalReferenceAsOperand(reference, scratch);
} }
using FloatInstruction = void (SharedTurboAssembler::*)(XMMRegister, using FloatInstruction = void (SharedMacroAssemblerBase::*)(XMMRegister,
XMMRegister, Operand); XMMRegister,
Operand);
void FloatUnop(XMMRegister dst, XMMRegister src, Register tmp, void FloatUnop(XMMRegister dst, XMMRegister src, Register tmp,
FloatInstruction op, ExternalReference ext) { FloatInstruction op, ExternalReference ext) {
if (!CpuFeatures::IsSupported(AVX) && (dst != src)) { if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
movaps(dst, src); movaps(dst, src);
src = dst; src = dst;
} }
SharedTurboAssembler* assm = this; SharedMacroAssemblerBase* assm = this;
(assm->*op)(dst, src, ExternalReferenceAsOperand(ext, tmp)); (assm->*op)(dst, src, ExternalReferenceAsOperand(ext, tmp));
} }
}; };

File diff suppressed because it is too large Load Diff

View File

@ -55,10 +55,10 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor); DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
}; };
class V8_EXPORT_PRIVATE TurboAssembler class V8_EXPORT_PRIVATE MacroAssembler
: public SharedTurboAssemblerBase<TurboAssembler> { : public SharedMacroAssembler<MacroAssembler> {
public: public:
using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase; using SharedMacroAssembler<MacroAssembler>::SharedMacroAssembler;
void PushReturnAddressFrom(Register src) { pushq(src); } void PushReturnAddressFrom(Register src) { pushq(src); }
void PopReturnAddressTo(Register dst) { popq(dst); } void PopReturnAddressTo(Register dst) { popq(dst); }
@ -653,23 +653,6 @@ class V8_EXPORT_PRIVATE TurboAssembler
IsolateRootLocation isolateRootLocation = IsolateRootLocation isolateRootLocation =
IsolateRootLocation::kInRootRegister); IsolateRootLocation::kInRootRegister);
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
// Returns a register holding the smi value. The register MUST NOT be
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi value);
// Drops arguments assuming that the return address was already popped.
void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
ArgumentsCountMode mode = kCountExcludesReceiver);
};
// MacroAssembler implements a collection of frequently used macros.
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
// Loads and stores the value of an external reference. // Loads and stores the value of an external reference.
// Special case code for load and store to take advantage of // Special case code for load and store to take advantage of
// load_rax/store_rax if possible/necessary. // load_rax/store_rax if possible/necessary.
@ -781,7 +764,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Macro instructions. // Macro instructions.
using TurboAssembler::Cmp;
void Cmp(Register dst, Handle<Object> source); void Cmp(Register dst, Handle<Object> source);
void Cmp(Operand dst, Handle<Object> source); void Cmp(Operand dst, Handle<Object> source);
@ -945,6 +927,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// In-place weak references. // In-place weak references.
void LoadWeakValue(Register in_out, Label* target_if_cleared); void LoadWeakValue(Register in_out, Label* target_if_cleared);
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
// Returns a register holding the smi value. The register MUST NOT be
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi value);
// Drops arguments assuming that the return address was already popped.
void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
ArgumentsCountMode mode = kCountExcludesReceiver);
private: private:
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count, void InvokePrologue(Register expected_parameter_count,

View File

@ -29,7 +29,7 @@ namespace v8 {
namespace internal { namespace internal {
namespace compiler { namespace compiler {
#define __ tasm()-> #define __ masm()->
// Adds Arm-specific methods to convert InstructionOperands. // Adds Arm-specific methods to convert InstructionOperands.
class ArmOperandConverter final : public InstructionOperandConverter { class ArmOperandConverter final : public InstructionOperandConverter {
@ -415,7 +415,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
do { \ do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \ /* and generate a CallAddress instruction instead. */ \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2); \ __ PrepareCallCFunction(0, 2); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \ __ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \ i.InputDoubleRegister(1)); \
@ -429,7 +429,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
do { \ do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \ /* and generate a CallAddress instruction instead. */ \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1); \ __ PrepareCallCFunction(0, 1); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@ -473,7 +473,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
if (instr->InputAt(1)->IsImmediate()) { \ if (instr->InputAt(1)->IsImmediate()) { \
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \ __ asm_imm(dt, dst, src, i.InputInt##width(1)); \
} else { \ } else { \
UseScratchRegisterScope temps(tasm()); \ UseScratchRegisterScope temps(masm()); \
Simd128Register tmp = temps.AcquireQ(); \ Simd128Register tmp = temps.AcquireQ(); \
Register shift = temps.Acquire(); \ Register shift = temps.Acquire(); \
constexpr int mask = (1 << width) - 1; \ constexpr int mask = (1 << width) - 1; \
@ -493,7 +493,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
if (instr->InputAt(1)->IsImmediate()) { \ if (instr->InputAt(1)->IsImmediate()) { \
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \ __ asm_imm(dt, dst, src, i.InputInt##width(1)); \
} else { \ } else { \
UseScratchRegisterScope temps(tasm()); \ UseScratchRegisterScope temps(masm()); \
Simd128Register tmp = temps.AcquireQ(); \ Simd128Register tmp = temps.AcquireQ(); \
Register shift = temps.Acquire(); \ Register shift = temps.Acquire(); \
constexpr int mask = (1 << width) - 1; \ constexpr int mask = (1 << width) - 1; \
@ -518,20 +518,20 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace { namespace {
void FlushPendingPushRegisters(TurboAssembler* tasm, void FlushPendingPushRegisters(MacroAssembler* masm,
FrameAccessState* frame_access_state, FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) { ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) { switch (pending_pushes->size()) {
case 0: case 0:
break; break;
case 1: case 1:
tasm->push((*pending_pushes)[0]); masm->push((*pending_pushes)[0]);
break; break;
case 2: case 2:
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]); masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break; break;
case 3: case 3:
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1], masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]); (*pending_pushes)[2]);
break; break;
default: default:
@ -542,7 +542,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
} }
void AdjustStackPointerForTailCall( void AdjustStackPointerForTailCall(
TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp, MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr, ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) { bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() + int current_sp_offset = state->GetSPToFPSlotCount() +
@ -550,15 +550,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset; int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) { if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) { if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes); FlushPendingPushRegisters(masm, state, pending_pushes);
} }
tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize); masm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) { } else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) { if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes); FlushPendingPushRegisters(masm, state, pending_pushes);
} }
tasm->add(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); masm->add(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} }
} }
@ -601,7 +601,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination())); LocationOperand::cast(move->destination()));
InstructionOperand source(move->source()); InstructionOperand source(move->source());
AdjustStackPointerForTailCall( AdjustStackPointerForTailCall(
tasm(), frame_access_state(), masm(), frame_access_state(),
destination_location.index() - pending_pushes.size(), destination_location.index() - pending_pushes.size(),
&pending_pushes); &pending_pushes);
// Pushes of non-register data types are not supported. // Pushes of non-register data types are not supported.
@ -611,26 +611,26 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
// TODO(arm): We can push more than 3 registers at once. Add support in // TODO(arm): We can push more than 3 registers at once. Add support in
// the macro-assembler for pushing a list of registers. // the macro-assembler for pushing a list of registers.
if (pending_pushes.size() == 3) { if (pending_pushes.size() == 3) {
FlushPendingPushRegisters(tasm(), frame_access_state(), FlushPendingPushRegisters(masm(), frame_access_state(),
&pending_pushes); &pending_pushes);
} }
move->Eliminate(); move->Eliminate();
} }
FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes); FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
} }
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, nullptr, false); first_unused_slot_offset, nullptr, false);
} }
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) { int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset); first_unused_slot_offset);
} }
// Check that {kJavaScriptCallCodeStartRegister} is correct. // Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() { void CodeGenerator::AssembleCodeStartRegisterCheck() {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ ComputeCodeStartAddress(scratch); __ ComputeCodeStartAddress(scratch);
__ cmp(scratch, kJavaScriptCallCodeStartRegister); __ cmp(scratch, kJavaScriptCallCodeStartRegister);
@ -645,7 +645,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 2. test kMarkedForDeoptimizationBit in those flags; and // 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin. // 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() { void CodeGenerator::BailoutIfDeoptimized() {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
__ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset)); __ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
@ -747,7 +747,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallJSFunction: { case kArchCallJSFunction: {
Register func = i.InputRegister(0); Register func = i.InputRegister(0);
if (v8_flags.debug_code) { if (v8_flags.debug_code) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
// Check the function's context matches the context argument. // Check the function's context matches the context argument.
__ ldr(scratch, FieldMemOperand(func, JSFunction::kContextOffset)); __ ldr(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
@ -858,7 +858,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{ {
// We don't actually want to generate a pile of code for this, so just // We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one. // claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
@ -1069,7 +1069,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(2), i.OutputSBit()); i.InputRegister(2), i.OutputSBit());
break; break;
case kArmMls: { case kArmMls: {
CpuFeatureScope scope(tasm(), ARMv7); CpuFeatureScope scope(masm(), ARMv7);
__ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputRegister(2)); i.InputRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());
@ -1093,13 +1093,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(1), i.OutputSBit()); i.InputRegister(1), i.OutputSBit());
break; break;
case kArmSdiv: { case kArmSdiv: {
CpuFeatureScope scope(tasm(), SUDIV); CpuFeatureScope scope(masm(), SUDIV);
__ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());
break; break;
} }
case kArmUdiv: { case kArmUdiv: {
CpuFeatureScope scope(tasm(), SUDIV); CpuFeatureScope scope(masm(), SUDIV);
__ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());
break; break;
@ -1127,20 +1127,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.OutputSBit()); i.OutputSBit());
break; break;
case kArmBfc: { case kArmBfc: {
CpuFeatureScope scope(tasm(), ARMv7); CpuFeatureScope scope(masm(), ARMv7);
__ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2)); __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());
break; break;
} }
case kArmUbfx: { case kArmUbfx: {
CpuFeatureScope scope(tasm(), ARMv7); CpuFeatureScope scope(masm(), ARMv7);
__ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2)); i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());
break; break;
} }
case kArmSbfx: { case kArmSbfx: {
CpuFeatureScope scope(tasm(), ARMv7); CpuFeatureScope scope(masm(), ARMv7);
__ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), __ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2)); i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());
@ -1183,7 +1183,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());
break; break;
case kArmRbit: { case kArmRbit: {
CpuFeatureScope scope(tasm(), ARMv7); CpuFeatureScope scope(masm(), ARMv7);
__ rbit(i.OutputRegister(), i.InputRegister(0)); __ rbit(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());
break; break;
@ -1378,7 +1378,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmVmodF64: { case kArmVmodF64: {
// TODO(bmeurer): We should really get rid of this special instruction, // TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead. // and generate a CallAddress instruction instead.
FrameScope scope(tasm(), StackFrame::MANUAL); FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2); __ PrepareCallCFunction(0, 2);
__ MovToFloatParameters(i.InputDoubleRegister(0), __ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1)); i.InputDoubleRegister(1));
@ -1398,7 +1398,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break; break;
case kArmVrintmF32: { case kArmVrintmF32: {
CpuFeatureScope scope(tasm(), ARMv8); CpuFeatureScope scope(masm(), ARMv8);
if (instr->InputAt(0)->IsSimd128Register()) { if (instr->InputAt(0)->IsSimd128Register()) {
__ vrintm(NeonS32, i.OutputSimd128Register(), __ vrintm(NeonS32, i.OutputSimd128Register(),
i.InputSimd128Register(0)); i.InputSimd128Register(0));
@ -1408,12 +1408,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmVrintmF64: { case kArmVrintmF64: {
CpuFeatureScope scope(tasm(), ARMv8); CpuFeatureScope scope(masm(), ARMv8);
__ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break; break;
} }
case kArmVrintpF32: { case kArmVrintpF32: {
CpuFeatureScope scope(tasm(), ARMv8); CpuFeatureScope scope(masm(), ARMv8);
if (instr->InputAt(0)->IsSimd128Register()) { if (instr->InputAt(0)->IsSimd128Register()) {
__ vrintp(NeonS32, i.OutputSimd128Register(), __ vrintp(NeonS32, i.OutputSimd128Register(),
i.InputSimd128Register(0)); i.InputSimd128Register(0));
@ -1423,12 +1423,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmVrintpF64: { case kArmVrintpF64: {
CpuFeatureScope scope(tasm(), ARMv8); CpuFeatureScope scope(masm(), ARMv8);
__ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break; break;
} }
case kArmVrintzF32: { case kArmVrintzF32: {
CpuFeatureScope scope(tasm(), ARMv8); CpuFeatureScope scope(masm(), ARMv8);
if (instr->InputAt(0)->IsSimd128Register()) { if (instr->InputAt(0)->IsSimd128Register()) {
__ vrintz(NeonS32, i.OutputSimd128Register(), __ vrintz(NeonS32, i.OutputSimd128Register(),
i.InputSimd128Register(0)); i.InputSimd128Register(0));
@ -1438,17 +1438,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmVrintzF64: { case kArmVrintzF64: {
CpuFeatureScope scope(tasm(), ARMv8); CpuFeatureScope scope(masm(), ARMv8);
__ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break; break;
} }
case kArmVrintaF64: { case kArmVrintaF64: {
CpuFeatureScope scope(tasm(), ARMv8); CpuFeatureScope scope(masm(), ARMv8);
__ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break; break;
} }
case kArmVrintnF32: { case kArmVrintnF32: {
CpuFeatureScope scope(tasm(), ARMv8); CpuFeatureScope scope(masm(), ARMv8);
if (instr->InputAt(0)->IsSimd128Register()) { if (instr->InputAt(0)->IsSimd128Register()) {
__ vrintn(NeonS32, i.OutputSimd128Register(), __ vrintn(NeonS32, i.OutputSimd128Register(),
i.InputSimd128Register(0)); i.InputSimd128Register(0));
@ -1458,7 +1458,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmVrintnF64: { case kArmVrintnF64: {
CpuFeatureScope scope(tasm(), ARMv8); CpuFeatureScope scope(masm(), ARMv8);
__ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break; break;
} }
@ -1473,7 +1473,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmVcvtF32S32: { case kArmVcvtF32S32: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS(); SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0)); __ vmov(scratch, i.InputRegister(0));
__ vcvt_f32_s32(i.OutputFloatRegister(), scratch); __ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
@ -1481,7 +1481,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmVcvtF32U32: { case kArmVcvtF32U32: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS(); SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0)); __ vmov(scratch, i.InputRegister(0));
__ vcvt_f32_u32(i.OutputFloatRegister(), scratch); __ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
@ -1489,7 +1489,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmVcvtF64S32: { case kArmVcvtF64S32: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS(); SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0)); __ vmov(scratch, i.InputRegister(0));
__ vcvt_f64_s32(i.OutputDoubleRegister(), scratch); __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
@ -1497,7 +1497,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmVcvtF64U32: { case kArmVcvtF64U32: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS(); SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0)); __ vmov(scratch, i.InputRegister(0));
__ vcvt_f64_u32(i.OutputDoubleRegister(), scratch); __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
@ -1505,7 +1505,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmVcvtS32F32: { case kArmVcvtS32F32: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS(); SwVfpRegister scratch = temps.AcquireS();
__ vcvt_s32_f32(scratch, i.InputFloatRegister(0)); __ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch); __ vmov(i.OutputRegister(), scratch);
@ -1520,7 +1520,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmVcvtU32F32: { case kArmVcvtU32F32: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS(); SwVfpRegister scratch = temps.AcquireS();
__ vcvt_u32_f32(scratch, i.InputFloatRegister(0)); __ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch); __ vmov(i.OutputRegister(), scratch);
@ -1535,7 +1535,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmVcvtS32F64: { case kArmVcvtS32F64: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS(); SwVfpRegister scratch = temps.AcquireS();
__ vcvt_s32_f64(scratch, i.InputDoubleRegister(0)); __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch); __ vmov(i.OutputRegister(), scratch);
@ -1543,7 +1543,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmVcvtU32F64: { case kArmVcvtU32F64: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
SwVfpRegister scratch = temps.AcquireS(); SwVfpRegister scratch = temps.AcquireS();
__ vcvt_u32_f64(scratch, i.InputDoubleRegister(0)); __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch); __ vmov(i.OutputRegister(), scratch);
@ -1762,7 +1762,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vldr(i.OutputFloatRegister(), MemOperand(fp, offset)); __ vldr(i.OutputFloatRegister(), MemOperand(fp, offset));
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation()); DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ add(scratch, fp, Operand(offset)); __ add(scratch, fp, Operand(offset));
__ vld1(Neon8, NeonListOperand(i.OutputSimd128Register()), __ vld1(Neon8, NeonListOperand(i.OutputSimd128Register()),
@ -1899,7 +1899,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
#undef ASSEMBLE_F64X2_ARITHMETIC_BINOP #undef ASSEMBLE_F64X2_ARITHMETIC_BINOP
case kArmF64x2Eq: { case kArmF64x2Eq: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ mov(scratch, Operand(0)); __ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(), __ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
@ -1915,7 +1915,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmF64x2Ne: { case kArmF64x2Ne: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ mov(scratch, Operand(0)); __ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(), __ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
@ -1931,7 +1931,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmF64x2Lt: { case kArmF64x2Lt: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(), __ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
i.InputSimd128Register(1).low()); i.InputSimd128Register(1).low());
@ -1947,7 +1947,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmF64x2Le: { case kArmF64x2Le: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(), __ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
i.InputSimd128Register(1).low()); i.InputSimd128Register(1).low());
@ -1989,7 +1989,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmF64x2Ceil: { case kArmF64x2Ceil: {
CpuFeatureScope scope(tasm(), ARMv8); CpuFeatureScope scope(masm(), ARMv8);
Simd128Register dst = i.OutputSimd128Register(); Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0); Simd128Register src = i.InputSimd128Register(0);
__ vrintp(dst.low(), src.low()); __ vrintp(dst.low(), src.low());
@ -1997,7 +1997,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmF64x2Floor: { case kArmF64x2Floor: {
CpuFeatureScope scope(tasm(), ARMv8); CpuFeatureScope scope(masm(), ARMv8);
Simd128Register dst = i.OutputSimd128Register(); Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0); Simd128Register src = i.InputSimd128Register(0);
__ vrintm(dst.low(), src.low()); __ vrintm(dst.low(), src.low());
@ -2005,7 +2005,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmF64x2Trunc: { case kArmF64x2Trunc: {
CpuFeatureScope scope(tasm(), ARMv8); CpuFeatureScope scope(masm(), ARMv8);
Simd128Register dst = i.OutputSimd128Register(); Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0); Simd128Register src = i.InputSimd128Register(0);
__ vrintz(dst.low(), src.low()); __ vrintz(dst.low(), src.low());
@ -2013,7 +2013,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmF64x2NearestInt: { case kArmF64x2NearestInt: {
CpuFeatureScope scope(tasm(), ARMv8); CpuFeatureScope scope(masm(), ARMv8);
Simd128Register dst = i.OutputSimd128Register(); Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0); Simd128Register src = i.InputSimd128Register(0);
__ vrintn(dst.low(), src.low()); __ vrintn(dst.low(), src.low());
@ -2060,7 +2060,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmI64x2Mul: { case kArmI64x2Mul: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
QwNeonRegister dst = i.OutputSimd128Register(); QwNeonRegister dst = i.OutputSimd128Register();
QwNeonRegister left = i.InputSimd128Register(0); QwNeonRegister left = i.InputSimd128Register(0);
QwNeonRegister right = i.InputSimd128Register(1); QwNeonRegister right = i.InputSimd128Register(1);
@ -2447,7 +2447,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kArmI32x4BitMask: { case kArmI32x4BitMask: {
Register dst = i.OutputRegister(); Register dst = i.OutputRegister();
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register src = i.InputSimd128Register(0); Simd128Register src = i.InputSimd128Register(0);
Simd128Register tmp = temps.AcquireQ(); Simd128Register tmp = temps.AcquireQ();
Simd128Register mask = i.TempSimd128Register(0); Simd128Register mask = i.TempSimd128Register(0);
@ -2468,7 +2468,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register lhs = i.InputSimd128Register(0); Simd128Register lhs = i.InputSimd128Register(0);
Simd128Register rhs = i.InputSimd128Register(1); Simd128Register rhs = i.InputSimd128Register(1);
Simd128Register tmp1 = i.TempSimd128Register(0); Simd128Register tmp1 = i.TempSimd128Register(0);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
__ vmull(NeonS16, tmp1, lhs.low(), rhs.low()); __ vmull(NeonS16, tmp1, lhs.low(), rhs.low());
__ vmull(NeonS16, scratch, lhs.high(), rhs.high()); __ vmull(NeonS16, scratch, lhs.high(), rhs.high());
@ -2650,7 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmI16x8BitMask: { case kArmI16x8BitMask: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register dst = i.OutputRegister(); Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0); Simd128Register src = i.InputSimd128Register(0);
Simd128Register tmp = temps.AcquireQ(); Simd128Register tmp = temps.AcquireQ();
@ -2805,7 +2805,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArmI8x16BitMask: { case kArmI8x16BitMask: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register dst = i.OutputRegister(); Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0); Simd128Register src = i.InputSimd128Register(0);
Simd128Register tmp = temps.AcquireQ(); Simd128Register tmp = temps.AcquireQ();
@ -2906,7 +2906,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(), Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0)); DCHECK(dst == i.InputSimd128Register(0));
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7] // src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
__ vmov(scratch, src1); __ vmov(scratch, src1);
@ -2917,7 +2917,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(), Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0)); DCHECK(dst == i.InputSimd128Register(0));
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from UnzipLeft). // src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from UnzipLeft).
__ vmov(scratch, src1); __ vmov(scratch, src1);
@ -2928,7 +2928,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(), Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0)); DCHECK(dst == i.InputSimd128Register(0));
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7] // src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
__ vmov(scratch, src1); __ vmov(scratch, src1);
@ -2961,7 +2961,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS32x4TransposeRight: { case kArmS32x4TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(), Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0)); DCHECK(dst == i.InputSimd128Register(0));
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from TransposeLeft). // src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from TransposeLeft).
@ -2990,7 +2990,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8UnzipLeft: { case kArmS16x8UnzipLeft: {
Simd128Register dst = i.OutputSimd128Register(), Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0)); DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15] // src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
@ -3001,7 +3001,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8UnzipRight: { case kArmS16x8UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(), Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0)); DCHECK(dst == i.InputSimd128Register(0));
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped). // src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
@ -3012,7 +3012,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8TransposeLeft: { case kArmS16x8TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(), Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0)); DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15] // src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
@ -3023,7 +3023,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8TransposeRight: { case kArmS16x8TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(), Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0)); DCHECK(dst == i.InputSimd128Register(0));
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped). // src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
@ -3052,7 +3052,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16UnzipLeft: { case kArmS8x16UnzipLeft: {
Simd128Register dst = i.OutputSimd128Register(), Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0)); DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31] // src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
@ -3063,7 +3063,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16UnzipRight: { case kArmS8x16UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(), Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0)); DCHECK(dst == i.InputSimd128Register(0));
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped). // src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
@ -3074,7 +3074,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16TransposeLeft: { case kArmS8x16TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(), Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0)); DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31] // src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
@ -3085,7 +3085,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16TransposeRight: { case kArmS8x16TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(), Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0)); DCHECK(dst == i.InputSimd128Register(0));
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped). // src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
@ -3112,7 +3112,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src0 = i.InputSimd128Register(0), src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
DwVfpRegister table_base = src0.low(); DwVfpRegister table_base = src0.low();
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Simd128Register scratch = temps.AcquireQ(); Simd128Register scratch = temps.AcquireQ();
// If unary shuffle, table is src0 (2 d-registers), otherwise src0 and // If unary shuffle, table is src0 (2 d-registers), otherwise src0 and
// src1. They must be consecutive. // src1. They must be consecutive.
@ -3163,7 +3163,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kArmV128AnyTrue: { case kArmV128AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0); const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
DwVfpRegister scratch = temps.AcquireD(); DwVfpRegister scratch = temps.AcquireD();
__ vpmax(NeonU32, scratch, src.low(), src.high()); __ vpmax(NeonU32, scratch, src.low(), src.high());
__ vpmax(NeonU32, scratch, scratch, scratch); __ vpmax(NeonU32, scratch, scratch, scratch);
@ -3178,7 +3178,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kArmI32x4AllTrue: { case kArmI32x4AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0); const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
DwVfpRegister scratch = temps.AcquireD(); DwVfpRegister scratch = temps.AcquireD();
__ vpmin(NeonU32, scratch, src.low(), src.high()); __ vpmin(NeonU32, scratch, src.low(), src.high());
__ vpmin(NeonU32, scratch, scratch, scratch); __ vpmin(NeonU32, scratch, scratch, scratch);
@ -3189,7 +3189,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kArmI16x8AllTrue: { case kArmI16x8AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0); const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
DwVfpRegister scratch = temps.AcquireD(); DwVfpRegister scratch = temps.AcquireD();
__ vpmin(NeonU16, scratch, src.low(), src.high()); __ vpmin(NeonU16, scratch, src.low(), src.high());
__ vpmin(NeonU16, scratch, scratch, scratch); __ vpmin(NeonU16, scratch, scratch, scratch);
@ -3201,7 +3201,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kArmI8x16AllTrue: { case kArmI8x16AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0); const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
DwVfpRegister scratch = temps.AcquireD(); DwVfpRegister scratch = temps.AcquireD();
__ vpmin(NeonU8, scratch, src.low(), src.high()); __ vpmin(NeonU8, scratch, src.low(), src.high());
__ vpmin(NeonU8, scratch, scratch, scratch); __ vpmin(NeonU8, scratch, scratch, scratch);
@ -3747,7 +3747,7 @@ void CodeGenerator::AssembleConstructFrame() {
// exception unconditionally. Thereby we can avoid the integer overflow // exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code. // check in the condition code.
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) { if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ ldr(scratch, FieldMemOperand( __ ldr(scratch, FieldMemOperand(
kWasmInstanceRegister, kWasmInstanceRegister,
@ -3873,8 +3873,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ cmp(argc_reg, Operand(parameter_slots)); __ cmp(argc_reg, Operand(parameter_slots));
__ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt); __ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt);
} }
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger, __ DropArguments(argc_reg, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} else if (additional_pop_count->IsImmediate()) { } else if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type()); DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
int additional_count = g.ToConstant(additional_pop_count).ToInt32(); int additional_count = g.ToConstant(additional_pop_count).ToInt32();
@ -3944,7 +3944,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsDoubleRegister()) { } else if (source->IsDoubleRegister()) {
__ vstr(g.ToDoubleRegister(source), dst); __ vstr(g.ToDoubleRegister(source), dst);
} else { } else {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register temp = temps.Acquire(); Register temp = temps.Acquire();
QwNeonRegister src = g.ToSimd128Register(source); QwNeonRegister src = g.ToSimd128Register(source);
__ add(temp, dst.rn(), Operand(dst.offset())); __ add(temp, dst.rn(), Operand(dst.offset()));
@ -3965,7 +3965,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsDoubleStackSlot()) { } else if (source->IsDoubleStackSlot()) {
__ vldr(g.ToDoubleRegister(destination), src); __ vldr(g.ToDoubleRegister(destination), src);
} else { } else {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register temp = temps.Acquire(); Register temp = temps.Acquire();
QwNeonRegister dst = g.ToSimd128Register(destination); QwNeonRegister dst = g.ToSimd128Register(destination);
__ add(temp, src.rn(), Operand(src.offset())); __ add(temp, src.rn(), Operand(src.offset()));
@ -3976,7 +3976,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case MoveType::kStackToStack: { case MoveType::kStackToStack: {
MemOperand src = g.ToMemOperand(source); MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination); MemOperand dst = g.ToMemOperand(destination);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
if (source->IsStackSlot() || source->IsFloatStackSlot()) { if (source->IsStackSlot() || source->IsFloatStackSlot()) {
SwVfpRegister temp = temps.AcquireS(); SwVfpRegister temp = temps.AcquireS();
__ vldr(temp, src); __ vldr(temp, src);
@ -4014,27 +4014,27 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Constant src = g.ToConstant(source); Constant src = g.ToConstant(source);
MemOperand dst = g.ToMemOperand(destination); MemOperand dst = g.ToMemOperand(destination);
if (destination->IsStackSlot()) { if (destination->IsStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
// Acquire a S register instead of a general purpose register in case // Acquire a S register instead of a general purpose register in case
// `vstr` needs one to compute the address of `dst`. // `vstr` needs one to compute the address of `dst`.
SwVfpRegister s_temp = temps.AcquireS(); SwVfpRegister s_temp = temps.AcquireS();
{ {
// TODO(arm): This sequence could be optimized further if necessary by // TODO(arm): This sequence could be optimized further if necessary by
// writing the constant directly into `s_temp`. // writing the constant directly into `s_temp`.
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register temp = temps.Acquire(); Register temp = temps.Acquire();
MoveConstantToRegister(temp, src); MoveConstantToRegister(temp, src);
__ vmov(s_temp, temp); __ vmov(s_temp, temp);
} }
__ vstr(s_temp, dst); __ vstr(s_temp, dst);
} else if (destination->IsFloatStackSlot()) { } else if (destination->IsFloatStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
SwVfpRegister temp = temps.AcquireS(); SwVfpRegister temp = temps.AcquireS();
__ vmov(temp, Float32::FromBits(src.ToFloat32AsInt())); __ vmov(temp, Float32::FromBits(src.ToFloat32AsInt()));
__ vstr(temp, dst); __ vstr(temp, dst);
} else { } else {
DCHECK(destination->IsDoubleStackSlot()); DCHECK(destination->IsDoubleStackSlot());
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
DwVfpRegister temp = temps.AcquireD(); DwVfpRegister temp = temps.AcquireD();
// TODO(arm): Look into optimizing this further if possible. Supporting // TODO(arm): Look into optimizing this further if possible. Supporting
// the NEON version of VMOV may help. // the NEON version of VMOV may help.
@ -4060,7 +4060,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
__ push(g.ToRegister(source)); __ push(g.ToRegister(source));
frame_access_state()->IncreaseSPDelta(new_slots); frame_access_state()->IncreaseSPDelta(new_slots);
} else if (source->IsStackSlot()) { } else if (source->IsStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ ldr(scratch, g.ToMemOperand(source)); __ ldr(scratch, g.ToMemOperand(source));
__ push(scratch); __ push(scratch);
@ -4083,7 +4083,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
if (dest->IsRegister()) { if (dest->IsRegister()) {
__ pop(g.ToRegister(dest)); __ pop(g.ToRegister(dest));
} else if (dest->IsStackSlot()) { } else if (dest->IsStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ pop(scratch); __ pop(scratch);
__ str(scratch, g.ToMemOperand(dest)); __ str(scratch, g.ToMemOperand(dest));
@ -4110,7 +4110,7 @@ void CodeGenerator::PopTempStackSlots() {
void CodeGenerator::MoveToTempLocation(InstructionOperand* source, void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
MachineRepresentation rep) { MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}. // Must be kept in sync with {MoveTempLocationTo}.
move_cycle_.temps.emplace(tasm()); move_cycle_.temps.emplace(masm());
auto& temps = *move_cycle_.temps; auto& temps = *move_cycle_.temps;
// Temporarily exclude the reserved scratch registers while we pick a // Temporarily exclude the reserved scratch registers while we pick a
// location to resolve the cycle. Re-include them immediately afterwards so // location to resolve the cycle. Re-include them immediately afterwards so
@ -4184,7 +4184,7 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
InstructionOperand& destination = move->destination(); InstructionOperand& destination = move->destination();
MoveType::Type move_type = MoveType::Type move_type =
MoveType::InferMove(&move->source(), &move->destination()); MoveType::InferMove(&move->source(), &move->destination());
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
if (move_type == MoveType::kStackToStack) { if (move_type == MoveType::kStackToStack) {
if (source.IsStackSlot() || source.IsFloatStackSlot()) { if (source.IsStackSlot() || source.IsFloatStackSlot()) {
SwVfpRegister temp = temps.AcquireS(); SwVfpRegister temp = temps.AcquireS();
@ -4224,7 +4224,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
DCHECK(destination->IsFloatRegister()); DCHECK(destination->IsFloatRegister());
// GapResolver may give us reg codes that don't map to actual // GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases. // s-registers. Generate code to work around those cases.
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
LowDwVfpRegister temp = temps.AcquireLowD(); LowDwVfpRegister temp = temps.AcquireLowD();
int src_code = LocationOperand::cast(source)->register_code(); int src_code = LocationOperand::cast(source)->register_code();
int dst_code = LocationOperand::cast(destination)->register_code(); int dst_code = LocationOperand::cast(destination)->register_code();
@ -4241,20 +4241,20 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
MemOperand dst = g.ToMemOperand(destination); MemOperand dst = g.ToMemOperand(destination);
if (source->IsRegister()) { if (source->IsRegister()) {
Register src = g.ToRegister(source); Register src = g.ToRegister(source);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
SwVfpRegister temp = temps.AcquireS(); SwVfpRegister temp = temps.AcquireS();
__ vmov(temp, src); __ vmov(temp, src);
__ ldr(src, dst); __ ldr(src, dst);
__ vstr(temp, dst); __ vstr(temp, dst);
} else if (source->IsFloatRegister()) { } else if (source->IsFloatRegister()) {
int src_code = LocationOperand::cast(source)->register_code(); int src_code = LocationOperand::cast(source)->register_code();
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
LowDwVfpRegister temp = temps.AcquireLowD(); LowDwVfpRegister temp = temps.AcquireLowD();
__ VmovExtended(temp.low().code(), src_code); __ VmovExtended(temp.low().code(), src_code);
__ VmovExtended(src_code, dst); __ VmovExtended(src_code, dst);
__ vstr(temp.low(), dst); __ vstr(temp.low(), dst);
} else if (source->IsDoubleRegister()) { } else if (source->IsDoubleRegister()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
DwVfpRegister temp = temps.AcquireD(); DwVfpRegister temp = temps.AcquireD();
DwVfpRegister src = g.ToDoubleRegister(source); DwVfpRegister src = g.ToDoubleRegister(source);
__ Move(temp, src); __ Move(temp, src);
@ -4262,7 +4262,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vstr(temp, dst); __ vstr(temp, dst);
} else { } else {
QwNeonRegister src = g.ToSimd128Register(source); QwNeonRegister src = g.ToSimd128Register(source);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register temp = temps.Acquire(); Register temp = temps.Acquire();
QwNeonRegister temp_q = temps.AcquireQ(); QwNeonRegister temp_q = temps.AcquireQ();
__ Move(temp_q, src); __ Move(temp_q, src);
@ -4276,7 +4276,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
MemOperand src = g.ToMemOperand(source); MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination); MemOperand dst = g.ToMemOperand(destination);
if (source->IsStackSlot() || source->IsFloatStackSlot()) { if (source->IsStackSlot() || source->IsFloatStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
SwVfpRegister temp_0 = temps.AcquireS(); SwVfpRegister temp_0 = temps.AcquireS();
SwVfpRegister temp_1 = temps.AcquireS(); SwVfpRegister temp_1 = temps.AcquireS();
__ vldr(temp_0, dst); __ vldr(temp_0, dst);
@ -4284,7 +4284,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vstr(temp_0, src); __ vstr(temp_0, src);
__ vstr(temp_1, dst); __ vstr(temp_1, dst);
} else if (source->IsDoubleStackSlot()) { } else if (source->IsDoubleStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
LowDwVfpRegister temp = temps.AcquireLowD(); LowDwVfpRegister temp = temps.AcquireLowD();
if (temps.CanAcquireD()) { if (temps.CanAcquireD()) {
DwVfpRegister temp_0 = temp; DwVfpRegister temp_0 = temp;
@ -4317,7 +4317,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
MemOperand dst0 = dst; MemOperand dst0 = dst;
MemOperand src1(src.rn(), src.offset() + kDoubleSize); MemOperand src1(src.rn(), src.offset() + kDoubleSize);
MemOperand dst1(dst.rn(), dst.offset() + kDoubleSize); MemOperand dst1(dst.rn(), dst.offset() + kDoubleSize);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
DwVfpRegister temp_0 = temps.AcquireD(); DwVfpRegister temp_0 = temps.AcquireD();
DwVfpRegister temp_1 = temps.AcquireD(); DwVfpRegister temp_1 = temps.AcquireD();
__ vldr(temp_0, dst0); __ vldr(temp_0, dst0);

View File

@ -397,7 +397,7 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
if (int_matcher.HasResolvedValue()) { if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta = ptrdiff_t const delta =
int_matcher.ResolvedValue() + int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference( MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue()); selector->isolate(), m.ResolvedValue());
input_count = 1; input_count = 1;
inputs[0] = g.UseImmediate(static_cast<int32_t>(delta)); inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
@ -753,7 +753,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
if (int_matcher.HasResolvedValue()) { if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta = ptrdiff_t const delta =
int_matcher.ResolvedValue() + int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference( MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue()); selector->isolate(), m.ResolvedValue());
int input_count = 2; int input_count = 2;
InstructionOperand inputs[2]; InstructionOperand inputs[2];

View File

@ -24,7 +24,7 @@ namespace v8 {
namespace internal { namespace internal {
namespace compiler { namespace compiler {
#define __ tasm()-> #define __ masm()->
// Adds Arm64-specific methods to convert InstructionOperands. // Adds Arm64-specific methods to convert InstructionOperands.
class Arm64OperandConverter final : public InstructionOperandConverter { class Arm64OperandConverter final : public InstructionOperandConverter {
@ -238,13 +238,13 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
UNREACHABLE(); UNREACHABLE();
} }
MemOperand ToMemOperand(InstructionOperand* op, TurboAssembler* tasm) const { MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
DCHECK_NOT_NULL(op); DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index(), tasm); return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
} }
MemOperand SlotToMemOperand(int slot, TurboAssembler* tasm) const { MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot); FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
if (offset.from_frame_pointer()) { if (offset.from_frame_pointer()) {
int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset(); int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
@ -294,7 +294,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: SaveFPRegsMode::kIgnore; : SaveFPRegsMode::kIgnore;
if (must_save_lr_) { if (must_save_lr_) {
// We need to save and restore lr if the frame was elided. // We need to save and restore lr if the frame was elided.
__ Push<TurboAssembler::kSignLR>(lr, padreg); __ Push<MacroAssembler::kSignLR>(lr, padreg);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp); unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp);
} }
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) { if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
@ -311,7 +311,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode); __ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode);
} }
if (must_save_lr_) { if (must_save_lr_) {
__ Pop<TurboAssembler::kAuthLR>(padreg, lr); __ Pop<MacroAssembler::kAuthLR>(padreg, lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset()); unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
} }
} }
@ -459,14 +459,14 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
// Handles unary ops that work for float (scalar), double (scalar), or NEON. // Handles unary ops that work for float (scalar), double (scalar), or NEON.
template <typename Fn> template <typename Fn>
void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr, void EmitFpOrNeonUnop(MacroAssembler* masm, Fn fn, Instruction* instr,
Arm64OperandConverter i, VectorFormat scalar, Arm64OperandConverter i, VectorFormat scalar,
VectorFormat vector) { VectorFormat vector) {
VectorFormat f = instr->InputAt(0)->IsSimd128Register() ? vector : scalar; VectorFormat f = instr->InputAt(0)->IsSimd128Register() ? vector : scalar;
VRegister output = VRegister::Create(i.OutputDoubleRegister().code(), f); VRegister output = VRegister::Create(i.OutputDoubleRegister().code(), f);
VRegister input = VRegister::Create(i.InputDoubleRegister(0).code(), f); VRegister input = VRegister::Create(i.InputDoubleRegister(0).code(), f);
(tasm->*fn)(output, input); (masm->*fn)(output, input);
} }
} // namespace } // namespace
@ -539,13 +539,13 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
#define ASSEMBLE_IEEE754_BINOP(name) \ #define ASSEMBLE_IEEE754_BINOP(name) \
do { \ do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
} while (0) } while (0)
#define ASSEMBLE_IEEE754_UNOP(name) \ #define ASSEMBLE_IEEE754_UNOP(name) \
do { \ do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
} while (0) } while (0)
@ -558,7 +558,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
__ asm_imm(i.OutputSimd128Register().format(), \ __ asm_imm(i.OutputSimd128Register().format(), \
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \ i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
} else { \ } else { \
UseScratchRegisterScope temps(tasm()); \ UseScratchRegisterScope temps(masm()); \
VRegister tmp = temps.AcquireQ(); \ VRegister tmp = temps.AcquireQ(); \
Register shift = temps.Acquire##gp(); \ Register shift = temps.Acquire##gp(); \
constexpr int mask = (1 << width) - 1; \ constexpr int mask = (1 << width) - 1; \
@ -578,7 +578,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
__ asm_imm(i.OutputSimd128Register().format(), \ __ asm_imm(i.OutputSimd128Register().format(), \
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \ i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
} else { \ } else { \
UseScratchRegisterScope temps(tasm()); \ UseScratchRegisterScope temps(masm()); \
VRegister tmp = temps.AcquireQ(); \ VRegister tmp = temps.AcquireQ(); \
Register shift = temps.Acquire##gp(); \ Register shift = temps.Acquire##gp(); \
constexpr int mask = (1 << width) - 1; \ constexpr int mask = (1 << width) - 1; \
@ -592,7 +592,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
void CodeGenerator::AssembleDeconstructFrame() { void CodeGenerator::AssembleDeconstructFrame() {
__ Mov(sp, fp); __ Mov(sp, fp);
__ Pop<TurboAssembler::kAuthLR>(fp, lr); __ Pop<MacroAssembler::kAuthLR>(fp, lr);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset()); unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
} }
@ -606,7 +606,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace { namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm, void AdjustStackPointerForTailCall(MacroAssembler* masm,
FrameAccessState* state, FrameAccessState* state,
int new_slot_above_sp, int new_slot_above_sp,
bool allow_shrinkage = true) { bool allow_shrinkage = true) {
@ -615,10 +615,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
int stack_slot_delta = new_slot_above_sp - current_sp_offset; int stack_slot_delta = new_slot_above_sp - current_sp_offset;
DCHECK_EQ(stack_slot_delta % 2, 0); DCHECK_EQ(stack_slot_delta % 2, 0);
if (stack_slot_delta > 0) { if (stack_slot_delta > 0) {
tasm->Claim(stack_slot_delta); masm->Claim(stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) { } else if (allow_shrinkage && stack_slot_delta < 0) {
tasm->Drop(-stack_slot_delta); masm->Drop(-stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} }
} }
@ -627,14 +627,14 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_slot_offset) { int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, false); first_unused_slot_offset, false);
} }
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) { int first_unused_slot_offset) {
DCHECK_EQ(first_unused_slot_offset % 2, 0); DCHECK_EQ(first_unused_slot_offset % 2, 0);
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset); first_unused_slot_offset);
DCHECK(instr->IsTailCall()); DCHECK(instr->IsTailCall());
InstructionOperandConverter g(this, instr); InstructionOperandConverter g(this, instr);
@ -646,7 +646,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check that {kJavaScriptCallCodeStartRegister} is correct. // Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() { void CodeGenerator::AssembleCodeStartRegisterCheck() {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX(); Register scratch = temps.AcquireX();
__ ComputeCodeStartAddress(scratch); __ ComputeCodeStartAddress(scratch);
__ cmp(scratch, kJavaScriptCallCodeStartRegister); __ cmp(scratch, kJavaScriptCallCodeStartRegister);
@ -705,7 +705,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(wasm_code, constant.rmode()); __ Jump(wasm_code, constant.rmode());
} else { } else {
Register target = i.InputRegister(0); Register target = i.InputRegister(0);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
temps.Exclude(x17); temps.Exclude(x17);
__ Mov(x17, target); __ Mov(x17, target);
__ Jump(x17); __ Jump(x17);
@ -737,7 +737,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES( DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister); reg == kJavaScriptCallCodeStartRegister);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
temps.Exclude(x17); temps.Exclude(x17);
__ Mov(x17, reg); __ Mov(x17, reg);
__ Jump(x17); __ Jump(x17);
@ -750,7 +750,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0); Register func = i.InputRegister(0);
if (v8_flags.debug_code) { if (v8_flags.debug_code) {
// Check the function's context matches the context argument. // Check the function's context matches the context argument.
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX(); Register temp = scope.AcquireX();
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
temp, FieldMemOperand(func, JSFunction::kContextOffset)); temp, FieldMemOperand(func, JSFunction::kContextOffset));
@ -860,7 +860,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{ {
// We don't actually want to generate a pile of code for this, so just // We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one. // claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(BUILTIN_CODE(isolate(), AbortCSADcheck), __ Call(BUILTIN_CODE(isolate(), AbortCSADcheck),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
@ -1051,39 +1051,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(tanh); ASSEMBLE_IEEE754_UNOP(tanh);
break; break;
case kArm64Float32RoundDown: case kArm64Float32RoundDown:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatS, EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatS,
kFormat4S); kFormat4S);
break; break;
case kArm64Float64RoundDown: case kArm64Float64RoundDown:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatD, EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatD,
kFormat2D); kFormat2D);
break; break;
case kArm64Float32RoundUp: case kArm64Float32RoundUp:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatS, EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatS,
kFormat4S); kFormat4S);
break; break;
case kArm64Float64RoundUp: case kArm64Float64RoundUp:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatD, EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatD,
kFormat2D); kFormat2D);
break; break;
case kArm64Float64RoundTiesAway: case kArm64Float64RoundTiesAway:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frinta, instr, i, kFormatD, EmitFpOrNeonUnop(masm(), &MacroAssembler::Frinta, instr, i, kFormatD,
kFormat2D); kFormat2D);
break; break;
case kArm64Float32RoundTruncate: case kArm64Float32RoundTruncate:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatS, EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatS,
kFormat4S); kFormat4S);
break; break;
case kArm64Float64RoundTruncate: case kArm64Float64RoundTruncate:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatD, EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatD,
kFormat2D); kFormat2D);
break; break;
case kArm64Float32RoundTiesEven: case kArm64Float32RoundTiesEven:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatS, EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatS,
kFormat4S); kFormat4S);
break; break;
case kArm64Float64RoundTiesEven: case kArm64Float64RoundTiesEven:
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatD, EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatD,
kFormat2D); kFormat2D);
break; break;
case kArm64Add: case kArm64Add:
@ -1314,14 +1314,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1)); __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
break; break;
case kArm64Imod: { case kArm64Imod: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX(); Register temp = scope.AcquireX();
__ Sdiv(temp, i.InputRegister(0), i.InputRegister(1)); __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0)); __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
break; break;
} }
case kArm64Imod32: { case kArm64Imod32: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireW(); Register temp = scope.AcquireW();
__ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1)); __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1), __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
@ -1329,14 +1329,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArm64Umod: { case kArm64Umod: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX(); Register temp = scope.AcquireX();
__ Udiv(temp, i.InputRegister(0), i.InputRegister(1)); __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0)); __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
break; break;
} }
case kArm64Umod32: { case kArm64Umod32: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireW(); Register temp = scope.AcquireW();
__ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1)); __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1), __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
@ -1650,7 +1650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
case kArm64Float64Mod: { case kArm64Float64Mod: {
// TODO(turbofan): implement directly. // TODO(turbofan): implement directly.
FrameScope scope(tasm(), StackFrame::MANUAL); FrameScope scope(masm(), StackFrame::MANUAL);
DCHECK_EQ(d0, i.InputDoubleRegister(0)); DCHECK_EQ(d0, i.InputDoubleRegister(0));
DCHECK_EQ(d1, i.InputDoubleRegister(1)); DCHECK_EQ(d1, i.InputDoubleRegister(1));
DCHECK_EQ(d0, i.OutputDoubleRegister()); DCHECK_EQ(d0, i.OutputDoubleRegister());
@ -2369,7 +2369,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add); SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add);
SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub); SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub);
case kArm64I64x2Mul: { case kArm64I64x2Mul: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister dst = i.OutputSimd128Register(); VRegister dst = i.OutputSimd128Register();
VRegister src1 = i.InputSimd128Register(0); VRegister src1 = i.InputSimd128Register(0);
VRegister src2 = i.InputSimd128Register(1); VRegister src2 = i.InputSimd128Register(1);
@ -2470,7 +2470,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi); SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi);
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs); SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs);
case kArm64I32x4BitMask: { case kArm64I32x4BitMask: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register dst = i.OutputRegister32(); Register dst = i.OutputRegister32();
VRegister src = i.InputSimd128Register(0); VRegister src = i.InputSimd128Register(0);
VRegister tmp = scope.AcquireQ(); VRegister tmp = scope.AcquireQ();
@ -2486,7 +2486,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArm64I32x4DotI16x8S: { case kArm64I32x4DotI16x8S: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister lhs = i.InputSimd128Register(0); VRegister lhs = i.InputSimd128Register(0);
VRegister rhs = i.InputSimd128Register(1); VRegister rhs = i.InputSimd128Register(1);
VRegister tmp1 = scope.AcquireV(kFormat4S); VRegister tmp1 = scope.AcquireV(kFormat4S);
@ -2497,7 +2497,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArm64I16x8DotI8x16S: { case kArm64I16x8DotI8x16S: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister lhs = i.InputSimd128Register(0); VRegister lhs = i.InputSimd128Register(0);
VRegister rhs = i.InputSimd128Register(1); VRegister rhs = i.InputSimd128Register(1);
VRegister tmp1 = scope.AcquireV(kFormat8H); VRegister tmp1 = scope.AcquireV(kFormat8H);
@ -2515,7 +2515,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1).V16B()); i.InputSimd128Register(1).V16B());
} else { } else {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister lhs = i.InputSimd128Register(0); VRegister lhs = i.InputSimd128Register(0);
VRegister rhs = i.InputSimd128Register(1); VRegister rhs = i.InputSimd128Register(1);
VRegister tmp1 = scope.AcquireV(kFormat8H); VRegister tmp1 = scope.AcquireV(kFormat8H);
@ -2553,7 +2553,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(), VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0), src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat4S); VRegister temp = scope.AcquireV(kFormat4S);
if (dst == src1) { if (dst == src1) {
__ Mov(temp, src1.V4S()); __ Mov(temp, src1.V4S());
@ -2574,7 +2574,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(), VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0), src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat4S); VRegister temp = scope.AcquireV(kFormat4S);
if (dst == src1) { if (dst == src1) {
__ Mov(temp, src1.V4S()); __ Mov(temp, src1.V4S());
@ -2588,7 +2588,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub); SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub);
SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H); SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H);
case kArm64I16x8BitMask: { case kArm64I16x8BitMask: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register dst = i.OutputRegister32(); Register dst = i.OutputRegister32();
VRegister src = i.InputSimd128Register(0); VRegister src = i.InputSimd128Register(0);
VRegister tmp = scope.AcquireQ(); VRegister tmp = scope.AcquireQ();
@ -2615,7 +2615,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(), VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0), src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat8H); VRegister temp = scope.AcquireV(kFormat8H);
if (dst == src1) { if (dst == src1) {
__ Mov(temp, src1.V8H()); __ Mov(temp, src1.V8H());
@ -2633,7 +2633,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(), VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0), src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1); src1 = i.InputSimd128Register(1);
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat8H); VRegister temp = scope.AcquireV(kFormat8H);
if (dst == src1) { if (dst == src1) {
__ Mov(temp, src1.V8H()); __ Mov(temp, src1.V8H());
@ -2644,7 +2644,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArm64I8x16BitMask: { case kArm64I8x16BitMask: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register dst = i.OutputRegister32(); Register dst = i.OutputRegister32();
VRegister src = i.InputSimd128Register(0); VRegister src = i.InputSimd128Register(0);
VRegister tmp = scope.AcquireQ(); VRegister tmp = scope.AcquireQ();
@ -2733,7 +2733,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src1 = i.InputSimd128Register(1).V4S(); src1 = i.InputSimd128Register(1).V4S();
// Check for in-place shuffles. // Check for in-place shuffles.
// If dst == src0 == src1, then the shuffle is unary and we only use src0. // If dst == src0 == src1, then the shuffle is unary and we only use src0.
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat4S); VRegister temp = scope.AcquireV(kFormat4S);
if (dst == src0) { if (dst == src0) {
__ Mov(temp, src0); __ Mov(temp, src0);
@ -2799,7 +2799,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(0, (imm1 | imm2) & (src0 == src1 ? 0xF0F0F0F0F0F0F0F0 DCHECK_EQ(0, (imm1 | imm2) & (src0 == src1 ? 0xF0F0F0F0F0F0F0F0
: 0xE0E0E0E0E0E0E0E0)); : 0xE0E0E0E0E0E0E0E0));
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat16B); VRegister temp = scope.AcquireV(kFormat16B);
__ Movi(temp, imm2, imm1); __ Movi(temp, imm2, imm1);
@ -2878,7 +2878,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArm64V128AnyTrue: { case kArm64V128AnyTrue: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
// For AnyTrue, the format does not matter; also, we would like to avoid // For AnyTrue, the format does not matter; also, we would like to avoid
// an expensive horizontal reduction. // an expensive horizontal reduction.
VRegister temp = scope.AcquireV(kFormat4S); VRegister temp = scope.AcquireV(kFormat4S);
@ -2891,7 +2891,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \ #define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
case Op: { \ case Op: { \
UseScratchRegisterScope scope(tasm()); \ UseScratchRegisterScope scope(masm()); \
VRegister temp = scope.AcquireV(format); \ VRegister temp = scope.AcquireV(format); \
__ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \ __ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
__ Umov(i.OutputRegister32(), temp, 0); \ __ Umov(i.OutputRegister32(), temp, 0); \
@ -3045,7 +3045,7 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
Arm64OperandConverter i(this, instr); Arm64OperandConverter i(this, instr);
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register input = i.InputRegister32(0); Register input = i.InputRegister32(0);
Register temp = scope.AcquireX(); Register temp = scope.AcquireX();
size_t const case_count = instr->InputCount() - 2; size_t const case_count = instr->InputCount() - 2;
@ -3066,7 +3066,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
{ {
const size_t instruction_count = const size_t instruction_count =
case_count * instructions_per_case + instructions_per_jump_target; case_count * instructions_per_case + instructions_per_jump_target;
TurboAssembler::BlockPoolsScope block_pools(tasm(), MacroAssembler::BlockPoolsScope block_pools(masm(),
instruction_count * kInstrSize); instruction_count * kInstrSize);
__ Bind(&table); __ Bind(&table);
for (size_t index = 0; index < case_count; ++index) { for (size_t index = 0; index < case_count; ++index) {
@ -3125,10 +3125,10 @@ void CodeGenerator::AssembleConstructFrame() {
DCHECK_EQ(required_slots % 2, 1); DCHECK_EQ(required_slots % 2, 1);
__ Prologue(); __ Prologue();
// Update required_slots count since we have just claimed one extra slot. // Update required_slots count since we have just claimed one extra slot.
static_assert(TurboAssembler::kExtraSlotClaimedByPrologue == 1); static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1);
required_slots -= TurboAssembler::kExtraSlotClaimedByPrologue; required_slots -= MacroAssembler::kExtraSlotClaimedByPrologue;
} else { } else {
__ Push<TurboAssembler::kSignLR>(lr, fp); __ Push<MacroAssembler::kSignLR>(lr, fp);
__ Mov(fp, sp); __ Mov(fp, sp);
} }
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset()); unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
@ -3151,7 +3151,7 @@ void CodeGenerator::AssembleConstructFrame() {
// One unoptimized frame slot has already been claimed when the actual // One unoptimized frame slot has already been claimed when the actual
// arguments count was pushed. // arguments count was pushed.
required_slots -= required_slots -=
unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue; unoptimized_frame_slots - MacroAssembler::kExtraSlotClaimedByPrologue;
} }
#if V8_ENABLE_WEBASSEMBLY #if V8_ENABLE_WEBASSEMBLY
@ -3165,7 +3165,7 @@ void CodeGenerator::AssembleConstructFrame() {
// exception unconditionally. Thereby we can avoid the integer overflow // exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code. // check in the condition code.
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) { if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register scratch = scope.AcquireX(); Register scratch = scope.AcquireX();
__ Ldr(scratch, FieldMemOperand( __ Ldr(scratch, FieldMemOperand(
kWasmInstanceRegister, kWasmInstanceRegister,
@ -3178,7 +3178,7 @@ void CodeGenerator::AssembleConstructFrame() {
{ {
// Finish the frame that hasn't been fully built yet. // Finish the frame that hasn't been fully built yet.
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX(); Register scratch = temps.AcquireX();
__ Mov(scratch, __ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@ -3209,7 +3209,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Claim(required_slots); __ Claim(required_slots);
break; break;
case CallDescriptor::kCallCodeObject: { case CallDescriptor::kCallCodeObject: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX(); Register scratch = temps.AcquireX();
__ Mov(scratch, __ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@ -3225,7 +3225,7 @@ void CodeGenerator::AssembleConstructFrame() {
} }
#if V8_ENABLE_WEBASSEMBLY #if V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallWasmFunction: { case CallDescriptor::kCallWasmFunction: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX(); Register scratch = temps.AcquireX();
__ Mov(scratch, __ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@ -3235,7 +3235,7 @@ void CodeGenerator::AssembleConstructFrame() {
} }
case CallDescriptor::kCallWasmImportWrapper: case CallDescriptor::kCallWasmImportWrapper:
case CallDescriptor::kCallWasmCapiFunction: { case CallDescriptor::kCallWasmCapiFunction: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX(); Register scratch = temps.AcquireX();
__ Mov(scratch, __ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@ -3254,7 +3254,7 @@ void CodeGenerator::AssembleConstructFrame() {
case CallDescriptor::kCallAddress: case CallDescriptor::kCallAddress:
#if V8_ENABLE_WEBASSEMBLY #if V8_ENABLE_WEBASSEMBLY
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX(); Register scratch = temps.AcquireX();
__ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY)); __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY));
__ Push(scratch, padreg); __ Push(scratch, padreg);
@ -3392,7 +3392,7 @@ void CodeGenerator::PrepareForDeoptimizationExits(
} }
// Emit the jumps to deoptimization entries. // Emit the jumps to deoptimization entries.
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register scratch = scope.AcquireX(); Register scratch = scope.AcquireX();
static_assert(static_cast<int>(kFirstDeoptimizeKind) == 0); static_assert(static_cast<int>(kFirstDeoptimizeKind) == 0);
for (int i = 0; i < kDeoptimizeKindCount; i++) { for (int i = 0; i < kDeoptimizeKindCount; i++) {
@ -3417,9 +3417,9 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
__ Push(padreg, g.ToRegister(source)); __ Push(padreg, g.ToRegister(source));
frame_access_state()->IncreaseSPDelta(new_slots); frame_access_state()->IncreaseSPDelta(new_slots);
} else if (source->IsStackSlot()) { } else if (source->IsStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX(); Register scratch = temps.AcquireX();
__ Ldr(scratch, g.ToMemOperand(source, tasm())); __ Ldr(scratch, g.ToMemOperand(source, masm()));
__ Push(padreg, scratch); __ Push(padreg, scratch);
frame_access_state()->IncreaseSPDelta(new_slots); frame_access_state()->IncreaseSPDelta(new_slots);
} else { } else {
@ -3440,10 +3440,10 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
if (dest->IsRegister()) { if (dest->IsRegister()) {
__ Pop(g.ToRegister(dest), padreg); __ Pop(g.ToRegister(dest), padreg);
} else if (dest->IsStackSlot()) { } else if (dest->IsStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX(); Register scratch = temps.AcquireX();
__ Pop(scratch, padreg); __ Pop(scratch, padreg);
__ Str(scratch, g.ToMemOperand(dest, tasm())); __ Str(scratch, g.ToMemOperand(dest, masm()));
} else { } else {
int last_frame_slot_id = int last_frame_slot_id =
frame_access_state_->frame()->GetTotalFrameSlotCount() - 1; frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
@ -3468,7 +3468,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
MachineRepresentation rep) { MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}. // Must be kept in sync with {MoveTempLocationTo}.
DCHECK(!source->IsImmediate()); DCHECK(!source->IsImmediate());
move_cycle_.temps.emplace(tasm()); move_cycle_.temps.emplace(masm());
auto& temps = *move_cycle_.temps; auto& temps = *move_cycle_.temps;
// Temporarily exclude the reserved scratch registers while we pick one to // Temporarily exclude the reserved scratch registers while we pick one to
// resolve the move cycle. Re-include them immediately afterwards as they // resolve the move cycle. Re-include them immediately afterwards as they
@ -3506,7 +3506,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
scratch_reg.code()); scratch_reg.code());
Arm64OperandConverter g(this, nullptr); Arm64OperandConverter g(this, nullptr);
if (source->IsStackSlot()) { if (source->IsStackSlot()) {
__ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, tasm())); __ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, masm()));
} else { } else {
DCHECK(source->IsRegister()); DCHECK(source->IsRegister());
__ fmov(g.ToDoubleRegister(&scratch), g.ToRegister(source)); __ fmov(g.ToDoubleRegister(&scratch), g.ToRegister(source));
@ -3535,7 +3535,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
move_cycle_.scratch_reg->code()); move_cycle_.scratch_reg->code());
Arm64OperandConverter g(this, nullptr); Arm64OperandConverter g(this, nullptr);
if (dest->IsStackSlot()) { if (dest->IsStackSlot()) {
__ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, tasm())); __ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, masm()));
} else { } else {
DCHECK(dest->IsRegister()); DCHECK(dest->IsRegister());
__ fmov(g.ToRegister(dest), g.ToDoubleRegister(&scratch)); __ fmov(g.ToRegister(dest), g.ToDoubleRegister(&scratch));
@ -3557,9 +3557,9 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
auto move_type = MoveType::InferMove(&move->source(), &move->destination()); auto move_type = MoveType::InferMove(&move->source(), &move->destination());
if (move_type == MoveType::kStackToStack) { if (move_type == MoveType::kStackToStack) {
Arm64OperandConverter g(this, nullptr); Arm64OperandConverter g(this, nullptr);
MemOperand src = g.ToMemOperand(&move->source(), tasm()); MemOperand src = g.ToMemOperand(&move->source(), masm());
MemOperand dst = g.ToMemOperand(&move->destination(), tasm()); MemOperand dst = g.ToMemOperand(&move->destination(), masm());
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
if (move->source().IsSimd128StackSlot()) { if (move->source().IsSimd128StackSlot()) {
VRegister temp = temps.AcquireQ(); VRegister temp = temps.AcquireQ();
move_cycle_.scratch_fp_regs.set(temp); move_cycle_.scratch_fp_regs.set(temp);
@ -3574,11 +3574,11 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
// Offset doesn't fit into the immediate field so the assembler will emit // Offset doesn't fit into the immediate field so the assembler will emit
// two instructions and use a second temp register. // two instructions and use a second temp register.
if ((src.IsImmediateOffset() && if ((src.IsImmediateOffset() &&
!tasm()->IsImmLSScaled(src_offset, src_size) && !masm()->IsImmLSScaled(src_offset, src_size) &&
!tasm()->IsImmLSUnscaled(src_offset)) || !masm()->IsImmLSUnscaled(src_offset)) ||
(dst.IsImmediateOffset() && (dst.IsImmediateOffset() &&
!tasm()->IsImmLSScaled(dst_offset, dst_size) && !masm()->IsImmLSScaled(dst_offset, dst_size) &&
!tasm()->IsImmLSUnscaled(dst_offset))) { !masm()->IsImmLSUnscaled(dst_offset))) {
Register temp = temps.AcquireX(); Register temp = temps.AcquireX();
move_cycle_.scratch_regs.set(temp); move_cycle_.scratch_regs.set(temp);
} }
@ -3627,7 +3627,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} }
return; return;
case MoveType::kRegisterToStack: { case MoveType::kRegisterToStack: {
MemOperand dst = g.ToMemOperand(destination, tasm()); MemOperand dst = g.ToMemOperand(destination, masm());
if (source->IsRegister()) { if (source->IsRegister()) {
__ Str(g.ToRegister(source), dst); __ Str(g.ToRegister(source), dst);
} else { } else {
@ -3642,7 +3642,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
return; return;
} }
case MoveType::kStackToRegister: { case MoveType::kStackToRegister: {
MemOperand src = g.ToMemOperand(source, tasm()); MemOperand src = g.ToMemOperand(source, masm());
if (destination->IsRegister()) { if (destination->IsRegister()) {
__ Ldr(g.ToRegister(destination), src); __ Ldr(g.ToRegister(destination), src);
} else { } else {
@ -3657,15 +3657,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
return; return;
} }
case MoveType::kStackToStack: { case MoveType::kStackToStack: {
MemOperand src = g.ToMemOperand(source, tasm()); MemOperand src = g.ToMemOperand(source, masm());
MemOperand dst = g.ToMemOperand(destination, tasm()); MemOperand dst = g.ToMemOperand(destination, masm());
if (source->IsSimd128StackSlot()) { if (source->IsSimd128StackSlot()) {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireQ(); VRegister temp = scope.AcquireQ();
__ Ldr(temp, src); __ Ldr(temp, src);
__ Str(temp, dst); __ Str(temp, dst);
} else { } else {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX(); Register temp = scope.AcquireX();
__ Ldr(temp, src); __ Ldr(temp, src);
__ Str(temp, dst); __ Str(temp, dst);
@ -3689,9 +3689,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} }
case MoveType::kConstantToStack: { case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source); Constant src = g.ToConstant(source);
MemOperand dst = g.ToMemOperand(destination, tasm()); MemOperand dst = g.ToMemOperand(destination, masm());
if (destination->IsStackSlot()) { if (destination->IsStackSlot()) {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX(); Register temp = scope.AcquireX();
MoveConstantToRegister(temp, src); MoveConstantToRegister(temp, src);
__ Str(temp, dst); __ Str(temp, dst);
@ -3699,7 +3699,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) { if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) {
__ Str(wzr, dst); __ Str(wzr, dst);
} else { } else {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireS(); VRegister temp = scope.AcquireS();
__ Fmov(temp, src.ToFloat32()); __ Fmov(temp, src.ToFloat32());
__ Str(temp, dst); __ Str(temp, dst);
@ -3709,7 +3709,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (src.ToFloat64().AsUint64() == 0) { if (src.ToFloat64().AsUint64() == 0) {
__ Str(xzr, dst); __ Str(xzr, dst);
} else { } else {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireD(); VRegister temp = scope.AcquireD();
__ Fmov(temp, src.ToFloat64().value()); __ Fmov(temp, src.ToFloat64().value());
__ Str(temp, dst); __ Str(temp, dst);
@ -3740,8 +3740,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
} }
return; return;
case MoveType::kRegisterToStack: { case MoveType::kRegisterToStack: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
MemOperand dst = g.ToMemOperand(destination, tasm()); MemOperand dst = g.ToMemOperand(destination, masm());
if (source->IsRegister()) { if (source->IsRegister()) {
Register temp = scope.AcquireX(); Register temp = scope.AcquireX();
Register src = g.ToRegister(source); Register src = g.ToRegister(source);
@ -3749,7 +3749,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ Ldr(src, dst); __ Ldr(src, dst);
__ Str(temp, dst); __ Str(temp, dst);
} else { } else {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
VRegister src = g.ToDoubleRegister(source); VRegister src = g.ToDoubleRegister(source);
if (source->IsFloatRegister() || source->IsDoubleRegister()) { if (source->IsFloatRegister() || source->IsDoubleRegister()) {
VRegister temp = scope.AcquireD(); VRegister temp = scope.AcquireD();
@ -3767,9 +3767,9 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
return; return;
} }
case MoveType::kStackToStack: { case MoveType::kStackToStack: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
MemOperand src = g.ToMemOperand(source, tasm()); MemOperand src = g.ToMemOperand(source, masm());
MemOperand dst = g.ToMemOperand(destination, tasm()); MemOperand dst = g.ToMemOperand(destination, masm());
VRegister temp_0 = scope.AcquireD(); VRegister temp_0 = scope.AcquireD();
VRegister temp_1 = scope.AcquireD(); VRegister temp_1 = scope.AcquireD();
if (source->IsSimd128StackSlot()) { if (source->IsSimd128StackSlot()) {

View File

@ -623,7 +623,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) { selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta = ptrdiff_t const delta =
g.GetIntegerConstantValue(index) + g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference( MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue()); selector->isolate(), m.ResolvedValue());
input_count = 1; input_count = 1;
// Check that the delta is a 32-bit integer due to the limitations of // Check that the delta is a 32-bit integer due to the limitations of
@ -988,7 +988,7 @@ void InstructionSelector::VisitStore(Node* node) {
CanAddressRelativeToRootsRegister(m.ResolvedValue())) { CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta = ptrdiff_t const delta =
g.GetIntegerConstantValue(index) + g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference( MacroAssemblerBase::RootRegisterOffsetForExternalReference(
isolate(), m.ResolvedValue()); isolate(), m.ResolvedValue());
if (is_int32(delta)) { if (is_int32(delta)) {
input_count = 2; input_count = 2;

View File

@ -266,14 +266,14 @@ class OutOfLineCode : public ZoneObject {
Label* entry() { return &entry_; } Label* entry() { return &entry_; }
Label* exit() { return &exit_; } Label* exit() { return &exit_; }
const Frame* frame() const { return frame_; } const Frame* frame() const { return frame_; }
TurboAssembler* tasm() { return tasm_; } MacroAssembler* masm() { return masm_; }
OutOfLineCode* next() const { return next_; } OutOfLineCode* next() const { return next_; }
private: private:
Label entry_; Label entry_;
Label exit_; Label exit_;
const Frame* const frame_; const Frame* const frame_;
TurboAssembler* const tasm_; MacroAssembler* const masm_;
OutOfLineCode* const next_; OutOfLineCode* const next_;
}; };

View File

@ -64,7 +64,7 @@ CodeGenerator::CodeGenerator(
current_block_(RpoNumber::Invalid()), current_block_(RpoNumber::Invalid()),
start_source_position_(start_source_position), start_source_position_(start_source_position),
current_source_position_(SourcePosition::Unknown()), current_source_position_(SourcePosition::Unknown()),
tasm_(isolate, options, CodeObjectRequired::kNo, masm_(isolate, options, CodeObjectRequired::kNo,
#if V8_ENABLE_WEBASSEMBLY #if V8_ENABLE_WEBASSEMBLY
buffer_cache ? buffer_cache->GetAssemblerBuffer( buffer_cache ? buffer_cache->GetAssemblerBuffer(
AssemblerBase::kDefaultBufferSize) AssemblerBase::kDefaultBufferSize)
@ -98,15 +98,15 @@ CodeGenerator::CodeGenerator(
} }
CreateFrameAccessState(frame); CreateFrameAccessState(frame);
CHECK_EQ(info->is_osr(), osr_helper_.has_value()); CHECK_EQ(info->is_osr(), osr_helper_.has_value());
tasm_.set_jump_optimization_info(jump_opt); masm_.set_jump_optimization_info(jump_opt);
CodeKind code_kind = info->code_kind(); CodeKind code_kind = info->code_kind();
if (code_kind == CodeKind::WASM_FUNCTION || if (code_kind == CodeKind::WASM_FUNCTION ||
code_kind == CodeKind::WASM_TO_CAPI_FUNCTION || code_kind == CodeKind::WASM_TO_CAPI_FUNCTION ||
code_kind == CodeKind::WASM_TO_JS_FUNCTION || code_kind == CodeKind::WASM_TO_JS_FUNCTION ||
code_kind == CodeKind::JS_TO_WASM_FUNCTION) { code_kind == CodeKind::JS_TO_WASM_FUNCTION) {
tasm_.set_abort_hard(true); masm_.set_abort_hard(true);
} }
tasm_.set_builtin(builtin); masm_.set_builtin(builtin);
} }
bool CodeGenerator::wasm_runtime_exception_support() const { bool CodeGenerator::wasm_runtime_exception_support() const {
@ -173,19 +173,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Label* jump_deoptimization_entry_label = Label* jump_deoptimization_entry_label =
&jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)]; &jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
if (info()->source_positions()) { if (info()->source_positions()) {
tasm()->RecordDeoptReason(deoptimization_reason, exit->node_id(), masm()->RecordDeoptReason(deoptimization_reason, exit->node_id(),
exit->pos(), deoptimization_id); exit->pos(), deoptimization_id);
} }
if (deopt_kind == DeoptimizeKind::kLazy) { if (deopt_kind == DeoptimizeKind::kLazy) {
++lazy_deopt_count_; ++lazy_deopt_count_;
tasm()->BindExceptionHandler(exit->label()); masm()->BindExceptionHandler(exit->label());
} else { } else {
++eager_deopt_count_; ++eager_deopt_count_;
tasm()->bind(exit->label()); masm()->bind(exit->label());
} }
Builtin target = Deoptimizer::GetDeoptimizationEntry(deopt_kind); Builtin target = Deoptimizer::GetDeoptimizationEntry(deopt_kind);
tasm()->CallForDeoptimization(target, deoptimization_id, exit->label(), masm()->CallForDeoptimization(target, deoptimization_id, exit->label(),
deopt_kind, exit->continue_label(), deopt_kind, exit->continue_label(),
jump_deoptimization_entry_label); jump_deoptimization_entry_label);
@ -195,7 +195,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
} }
void CodeGenerator::MaybeEmitOutOfLineConstantPool() { void CodeGenerator::MaybeEmitOutOfLineConstantPool() {
tasm()->MaybeEmitOutOfLineConstantPool(); masm()->MaybeEmitOutOfLineConstantPool();
} }
void CodeGenerator::AssembleCode() { void CodeGenerator::AssembleCode() {
@ -204,27 +204,27 @@ void CodeGenerator::AssembleCode() {
// Open a frame scope to indicate that there is a frame on the stack. The // Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up // MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in AssemblePrologue). // the frame (that is done in AssemblePrologue).
FrameScope frame_scope(tasm(), StackFrame::MANUAL); FrameScope frame_scope(masm(), StackFrame::MANUAL);
if (info->source_positions()) { if (info->source_positions()) {
AssembleSourcePosition(start_source_position()); AssembleSourcePosition(start_source_position());
} }
offsets_info_.code_start_register_check = tasm()->pc_offset(); offsets_info_.code_start_register_check = masm()->pc_offset();
tasm()->CodeEntry(); masm()->CodeEntry();
// Check that {kJavaScriptCallCodeStartRegister} has been set correctly. // Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
if (v8_flags.debug_code && info->called_with_code_start_register()) { if (v8_flags.debug_code && info->called_with_code_start_register()) {
tasm()->RecordComment("-- Prologue: check code start register --"); masm()->RecordComment("-- Prologue: check code start register --");
AssembleCodeStartRegisterCheck(); AssembleCodeStartRegisterCheck();
} }
offsets_info_.deopt_check = tasm()->pc_offset(); offsets_info_.deopt_check = masm()->pc_offset();
// We want to bailout only from JS functions, which are the only ones // We want to bailout only from JS functions, which are the only ones
// that are optimized. // that are optimized.
if (info->IsOptimizing()) { if (info->IsOptimizing()) {
DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall()); DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
tasm()->RecordComment("-- Prologue: check for deoptimization --"); masm()->RecordComment("-- Prologue: check for deoptimization --");
BailoutIfDeoptimized(); BailoutIfDeoptimized();
} }
@ -258,22 +258,22 @@ void CodeGenerator::AssembleCode() {
instr_starts_.assign(instructions()->instructions().size(), {}); instr_starts_.assign(instructions()->instructions().size(), {});
} }
// Assemble instructions in assembly order. // Assemble instructions in assembly order.
offsets_info_.blocks_start = tasm()->pc_offset(); offsets_info_.blocks_start = masm()->pc_offset();
for (const InstructionBlock* block : instructions()->ao_blocks()) { for (const InstructionBlock* block : instructions()->ao_blocks()) {
// Align loop headers on vendor recommended boundaries. // Align loop headers on vendor recommended boundaries.
if (!tasm()->jump_optimization_info()) { if (!masm()->jump_optimization_info()) {
if (block->ShouldAlignLoopHeader()) { if (block->ShouldAlignLoopHeader()) {
tasm()->LoopHeaderAlign(); masm()->LoopHeaderAlign();
} else if (block->ShouldAlignCodeTarget()) { } else if (block->ShouldAlignCodeTarget()) {
tasm()->CodeTargetAlign(); masm()->CodeTargetAlign();
} }
} }
if (info->trace_turbo_json()) { if (info->trace_turbo_json()) {
block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset(); block_starts_[block->rpo_number().ToInt()] = masm()->pc_offset();
} }
// Bind a label for a block. // Bind a label for a block.
current_block_ = block->rpo_number(); current_block_ = block->rpo_number();
unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block); unwinding_info_writer_.BeginInstructionBlock(masm()->pc_offset(), block);
if (v8_flags.code_comments) { if (v8_flags.code_comments) {
std::ostringstream buffer; std::ostringstream buffer;
buffer << "-- B" << block->rpo_number().ToInt() << " start"; buffer << "-- B" << block->rpo_number().ToInt() << " start";
@ -289,12 +289,12 @@ void CodeGenerator::AssembleCode() {
buffer << " (in loop " << block->loop_header().ToInt() << ")"; buffer << " (in loop " << block->loop_header().ToInt() << ")";
} }
buffer << " --"; buffer << " --";
tasm()->RecordComment(buffer.str().c_str()); masm()->RecordComment(buffer.str().c_str());
} }
frame_access_state()->MarkHasFrame(block->needs_frame()); frame_access_state()->MarkHasFrame(block->needs_frame());
tasm()->bind(GetLabel(current_block_)); masm()->bind(GetLabel(current_block_));
if (block->must_construct_frame()) { if (block->must_construct_frame()) {
AssembleConstructFrame(); AssembleConstructFrame();
@ -303,7 +303,7 @@ void CodeGenerator::AssembleCode() {
// using the roots. // using the roots.
// TODO(mtrofin): investigate how we can avoid doing this repeatedly. // TODO(mtrofin): investigate how we can avoid doing this repeatedly.
if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) { if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
tasm()->InitializeRootRegister(); masm()->InitializeRootRegister();
} }
} }
#ifdef V8_TARGET_ARCH_RISCV64 #ifdef V8_TARGET_ARCH_RISCV64
@ -312,10 +312,10 @@ void CodeGenerator::AssembleCode() {
// back between blocks. the Rvv instruction may get an incorrect vtype. so // back between blocks. the Rvv instruction may get an incorrect vtype. so
// here VectorUnit needs to be cleared to ensure that the vtype is correct // here VectorUnit needs to be cleared to ensure that the vtype is correct
// within the block. // within the block.
tasm()->VU.clear(); masm()->VU.clear();
#endif #endif
if (V8_EMBEDDED_CONSTANT_POOL_BOOL && !block->needs_frame()) { if (V8_EMBEDDED_CONSTANT_POOL_BOOL && !block->needs_frame()) {
ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); ConstantPoolUnavailableScope constant_pool_unavailable(masm());
result_ = AssembleBlock(block); result_ = AssembleBlock(block);
} else { } else {
result_ = AssembleBlock(block); result_ = AssembleBlock(block);
@ -325,29 +325,29 @@ void CodeGenerator::AssembleCode() {
} }
// Assemble all out-of-line code. // Assemble all out-of-line code.
offsets_info_.out_of_line_code = tasm()->pc_offset(); offsets_info_.out_of_line_code = masm()->pc_offset();
if (ools_) { if (ools_) {
tasm()->RecordComment("-- Out of line code --"); masm()->RecordComment("-- Out of line code --");
for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) { for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
tasm()->bind(ool->entry()); masm()->bind(ool->entry());
ool->Generate(); ool->Generate();
if (ool->exit()->is_bound()) tasm()->jmp(ool->exit()); if (ool->exit()->is_bound()) masm()->jmp(ool->exit());
} }
} }
// This nop operation is needed to ensure that the trampoline is not // This nop operation is needed to ensure that the trampoline is not
// confused with the pc of the call before deoptimization. // confused with the pc of the call before deoptimization.
// The test regress/regress-259 is an example of where we need it. // The test regress/regress-259 is an example of where we need it.
tasm()->nop(); masm()->nop();
// For some targets, we must make sure that constant and veneer pools are // For some targets, we must make sure that constant and veneer pools are
// emitted before emitting the deoptimization exits. // emitted before emitting the deoptimization exits.
PrepareForDeoptimizationExits(&deoptimization_exits_); PrepareForDeoptimizationExits(&deoptimization_exits_);
deopt_exit_start_offset_ = tasm()->pc_offset(); deopt_exit_start_offset_ = masm()->pc_offset();
// Assemble deoptimization exits. // Assemble deoptimization exits.
offsets_info_.deoptimization_exits = tasm()->pc_offset(); offsets_info_.deoptimization_exits = masm()->pc_offset();
int last_updated = 0; int last_updated = 0;
// We sort the deoptimization exits here so that the lazy ones will be visited // We sort the deoptimization exits here so that the lazy ones will be visited
// last. We need this as lazy deopts might need additional instructions. // last. We need this as lazy deopts might need additional instructions.
@ -367,7 +367,7 @@ void CodeGenerator::AssembleCode() {
{ {
#ifdef V8_TARGET_ARCH_PPC64 #ifdef V8_TARGET_ARCH_PPC64
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool( v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
tasm()); masm());
#endif #endif
for (DeoptimizationExit* exit : deoptimization_exits_) { for (DeoptimizationExit* exit : deoptimization_exits_) {
if (exit->emitted()) continue; if (exit->emitted()) continue;
@ -388,19 +388,19 @@ void CodeGenerator::AssembleCode() {
} }
} }
offsets_info_.pools = tasm()->pc_offset(); offsets_info_.pools = masm()->pc_offset();
// TODO(jgruber): Move all inlined metadata generation into a new, // TODO(jgruber): Move all inlined metadata generation into a new,
// architecture-independent version of FinishCode. Currently, this includes // architecture-independent version of FinishCode. Currently, this includes
// the safepoint table, handler table, constant pool, and code comments, in // the safepoint table, handler table, constant pool, and code comments, in
// that order. // that order.
FinishCode(); FinishCode();
offsets_info_.jump_tables = tasm()->pc_offset(); offsets_info_.jump_tables = masm()->pc_offset();
// Emit the jump tables. // Emit the jump tables.
if (jump_tables_) { if (jump_tables_) {
tasm()->Align(kSystemPointerSize); masm()->Align(kSystemPointerSize);
for (JumpTable* table = jump_tables_; table; table = table->next()) { for (JumpTable* table = jump_tables_; table; table = table->next()) {
tasm()->bind(table->label()); masm()->bind(table->label());
AssembleJumpTable(table->targets(), table->target_count()); AssembleJumpTable(table->targets(), table->target_count());
} }
} }
@ -408,24 +408,24 @@ void CodeGenerator::AssembleCode() {
// The LinuxPerfJitLogger logs code up until here, excluding the safepoint // The LinuxPerfJitLogger logs code up until here, excluding the safepoint
// table. Resolve the unwinding info now so it is aware of the same code // table. Resolve the unwinding info now so it is aware of the same code
// size as reported by perf. // size as reported by perf.
unwinding_info_writer_.Finish(tasm()->pc_offset()); unwinding_info_writer_.Finish(masm()->pc_offset());
// Final alignment before starting on the metadata section. // Final alignment before starting on the metadata section.
tasm()->Align(InstructionStream::kMetadataAlignment); masm()->Align(InstructionStream::kMetadataAlignment);
safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount()); safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
// Emit the exception handler table. // Emit the exception handler table.
if (!handlers_.empty()) { if (!handlers_.empty()) {
handler_table_offset_ = HandlerTable::EmitReturnTableStart(tasm()); handler_table_offset_ = HandlerTable::EmitReturnTableStart(masm());
for (size_t i = 0; i < handlers_.size(); ++i) { for (size_t i = 0; i < handlers_.size(); ++i) {
HandlerTable::EmitReturnEntry(tasm(), handlers_[i].pc_offset, HandlerTable::EmitReturnEntry(masm(), handlers_[i].pc_offset,
handlers_[i].handler->pos()); handlers_[i].handler->pos());
} }
} }
tasm()->MaybeEmitOutOfLineConstantPool(); masm()->MaybeEmitOutOfLineConstantPool();
tasm()->FinalizeJumpOptimizationInfo(); masm()->FinalizeJumpOptimizationInfo();
result_ = kSuccess; result_ = kSuccess;
} }
@ -435,7 +435,7 @@ void CodeGenerator::AssembleArchBinarySearchSwitchRange(
std::pair<int32_t, Label*>* end) { std::pair<int32_t, Label*>* end) {
if (end - begin < kBinarySearchSwitchMinimalCases) { if (end - begin < kBinarySearchSwitchMinimalCases) {
while (begin != end) { while (begin != end) {
tasm()->JumpIfEqual(input, begin->first, begin->second); masm()->JumpIfEqual(input, begin->first, begin->second);
++begin; ++begin;
} }
AssembleArchJumpRegardlessOfAssemblyOrder(def_block); AssembleArchJumpRegardlessOfAssemblyOrder(def_block);
@ -443,9 +443,9 @@ void CodeGenerator::AssembleArchBinarySearchSwitchRange(
} }
auto middle = begin + (end - begin) / 2; auto middle = begin + (end - begin) / 2;
Label less_label; Label less_label;
tasm()->JumpIfLessThan(input, middle->first, &less_label); masm()->JumpIfLessThan(input, middle->first, &less_label);
AssembleArchBinarySearchSwitchRange(input, def_block, middle, end); AssembleArchBinarySearchSwitchRange(input, def_block, middle, end);
tasm()->bind(&less_label); masm()->bind(&less_label);
AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle); AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle);
} }
@ -469,7 +469,7 @@ base::OwnedVector<byte> CodeGenerator::GetProtectedInstructionsData() {
MaybeHandle<Code> CodeGenerator::FinalizeCode() { MaybeHandle<Code> CodeGenerator::FinalizeCode() {
if (result_ != kSuccess) { if (result_ != kSuccess) {
tasm()->AbortedCodeGeneration(); masm()->AbortedCodeGeneration();
return {}; return {};
} }
@ -482,11 +482,11 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
// Allocate and install the code. // Allocate and install the code.
CodeDesc desc; CodeDesc desc;
tasm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_); masm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_);
#if defined(V8_OS_WIN64) #if defined(V8_OS_WIN64)
if (Builtins::IsBuiltinId(info_->builtin())) { if (Builtins::IsBuiltinId(info_->builtin())) {
isolate_->SetBuiltinUnwindData(info_->builtin(), tasm()->GetUnwindInfo()); isolate_->SetBuiltinUnwindData(info_->builtin(), masm()->GetUnwindInfo());
} }
#endif // V8_OS_WIN64 #endif // V8_OS_WIN64
@ -508,7 +508,7 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
Handle<Code> code; Handle<Code> code;
if (!maybe_code.ToHandle(&code)) { if (!maybe_code.ToHandle(&code)) {
tasm()->AbortedCodeGeneration(); masm()->AbortedCodeGeneration();
return {}; return {};
} }
@ -527,7 +527,7 @@ bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
} }
void CodeGenerator::RecordSafepoint(ReferenceMap* references) { void CodeGenerator::RecordSafepoint(ReferenceMap* references) {
auto safepoint = safepoints()->DefineSafepoint(tasm()); auto safepoint = safepoints()->DefineSafepoint(masm());
int frame_header_offset = frame()->GetFixedSlotCount(); int frame_header_offset = frame()->GetFixedSlotCount();
for (const InstructionOperand& operand : references->reference_operands()) { for (const InstructionOperand& operand : references->reference_operands()) {
if (operand.IsStackSlot()) { if (operand.IsStackSlot()) {
@ -558,7 +558,7 @@ bool CodeGenerator::IsMaterializableFromRoot(Handle<HeapObject> object,
CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock( CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
const InstructionBlock* block) { const InstructionBlock* block) {
if (block->IsHandler()) { if (block->IsHandler()) {
tasm()->ExceptionHandler(); masm()->ExceptionHandler();
} }
for (int i = block->code_start(); i < block->code_end(); ++i) { for (int i = block->code_start(); i < block->code_end(); ++i) {
CodeGenResult result = AssembleInstruction(i, block); CodeGenResult result = AssembleInstruction(i, block);
@ -718,7 +718,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
int instruction_index, const InstructionBlock* block) { int instruction_index, const InstructionBlock* block) {
Instruction* instr = instructions()->InstructionAt(instruction_index); Instruction* instr = instructions()->InstructionAt(instruction_index);
if (info()->trace_turbo_json()) { if (info()->trace_turbo_json()) {
instr_starts_[instruction_index].gap_pc_offset = tasm()->pc_offset(); instr_starts_[instruction_index].gap_pc_offset = masm()->pc_offset();
} }
int first_unused_stack_slot; int first_unused_stack_slot;
FlagsMode mode = FlagsModeField::decode(instr->opcode()); FlagsMode mode = FlagsModeField::decode(instr->opcode());
@ -738,14 +738,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
AssembleDeconstructFrame(); AssembleDeconstructFrame();
} }
if (info()->trace_turbo_json()) { if (info()->trace_turbo_json()) {
instr_starts_[instruction_index].arch_instr_pc_offset = tasm()->pc_offset(); instr_starts_[instruction_index].arch_instr_pc_offset = masm()->pc_offset();
} }
// Assemble architecture-specific code for the instruction. // Assemble architecture-specific code for the instruction.
CodeGenResult result = AssembleArchInstruction(instr); CodeGenResult result = AssembleArchInstruction(instr);
if (result != kSuccess) return result; if (result != kSuccess) return result;
if (info()->trace_turbo_json()) { if (info()->trace_turbo_json()) {
instr_starts_[instruction_index].condition_pc_offset = tasm()->pc_offset(); instr_starts_[instruction_index].condition_pc_offset = masm()->pc_offset();
} }
FlagsCondition condition = FlagsConditionField::decode(instr->opcode()); FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
@ -779,7 +779,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
branch.false_label = exit->continue_label(); branch.false_label = exit->continue_label();
branch.fallthru = true; branch.fallthru = true;
AssembleArchDeoptBranch(instr, &branch); AssembleArchDeoptBranch(instr, &branch);
tasm()->bind(exit->continue_label()); masm()->bind(exit->continue_label());
break; break;
} }
case kFlags_set: { case kFlags_set: {
@ -818,7 +818,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
if (source_position == current_source_position_) return; if (source_position == current_source_position_) return;
current_source_position_ = source_position; current_source_position_ = source_position;
if (!source_position.IsKnown()) return; if (!source_position.IsKnown()) return;
source_position_table_builder_.AddPosition(tasm()->pc_offset(), source_position_table_builder_.AddPosition(masm()->pc_offset(),
source_position, false); source_position, false);
if (v8_flags.code_comments) { if (v8_flags.code_comments) {
OptimizedCompilationInfo* info = this->info(); OptimizedCompilationInfo* info = this->info();
@ -833,8 +833,8 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
buffer << "-- "; buffer << "-- ";
// Turbolizer only needs the source position, as it can reconstruct // Turbolizer only needs the source position, as it can reconstruct
// the inlining stack from other information. // the inlining stack from other information.
if (info->trace_turbo_json() || !tasm()->isolate() || if (info->trace_turbo_json() || !masm()->isolate() ||
tasm()->isolate()->concurrent_recompilation_enabled()) { masm()->isolate()->concurrent_recompilation_enabled()) {
buffer << source_position; buffer << source_position;
} else { } else {
AllowGarbageCollection allocation; AllowGarbageCollection allocation;
@ -843,7 +843,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
buffer << source_position.InliningStack(info); buffer << source_position.InliningStack(info);
} }
buffer << " --"; buffer << " --";
tasm()->RecordComment(buffer.str().c_str()); masm()->RecordComment(buffer.str().c_str());
} }
} }
@ -981,7 +981,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1); RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler()); DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
handlers_.push_back( handlers_.push_back(
{GetLabel(handler_rpo), tasm()->pc_offset_for_safepoint()}); {GetLabel(handler_rpo), masm()->pc_offset_for_safepoint()});
} }
if (needs_frame_state) { if (needs_frame_state) {
@ -991,7 +991,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
size_t frame_state_offset = 1; size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor = FrameStateDescriptor* descriptor =
GetDeoptimizationEntry(instr, frame_state_offset).descriptor(); GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
int pc_offset = tasm()->pc_offset_for_safepoint(); int pc_offset = masm()->pc_offset_for_safepoint();
BuildTranslation(instr, pc_offset, frame_state_offset, 0, BuildTranslation(instr, pc_offset, frame_state_offset, 0,
descriptor->state_combine()); descriptor->state_combine());
} }
@ -1325,7 +1325,7 @@ void CodeGenerator::AddTranslationForOperand(Instruction* instr,
} }
void CodeGenerator::MarkLazyDeoptSite() { void CodeGenerator::MarkLazyDeoptSite() {
last_lazy_deopt_pc_ = tasm()->pc_offset(); last_lazy_deopt_pc_ = masm()->pc_offset();
} }
DeoptimizationExit* CodeGenerator::AddDeoptimizationExit( DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
@ -1336,7 +1336,7 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
} }
OutOfLineCode::OutOfLineCode(CodeGenerator* gen) OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) { : frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
gen->ools_ = this; gen->ools_ = this;
} }

View File

@ -188,7 +188,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
void RecordSafepoint(ReferenceMap* references); void RecordSafepoint(ReferenceMap* references);
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
TurboAssembler* tasm() { return &tasm_; } MacroAssembler* masm() { return &masm_; }
SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; } SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; }
size_t handler_table_offset() const { return handler_table_offset_; } size_t handler_table_offset() const { return handler_table_offset_; }
@ -448,7 +448,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
RpoNumber current_block_; RpoNumber current_block_;
SourcePosition start_source_position_; SourcePosition start_source_position_;
SourcePosition current_source_position_; SourcePosition current_source_position_;
TurboAssembler tasm_; MacroAssembler masm_;
GapResolver resolver_; GapResolver resolver_;
SafepointTableBuilder safepoints_; SafepointTableBuilder safepoints_;
ZoneVector<HandlerInfo> handlers_; ZoneVector<HandlerInfo> handlers_;

View File

@ -29,7 +29,7 @@ namespace v8 {
namespace internal { namespace internal {
namespace compiler { namespace compiler {
#define __ tasm()-> #define __ masm()->
#define kScratchDoubleReg xmm0 #define kScratchDoubleReg xmm0
@ -202,11 +202,11 @@ class IA32OperandConverter : public InstructionOperandConverter {
void MoveInstructionOperandToRegister(Register destination, void MoveInstructionOperandToRegister(Register destination,
InstructionOperand* op) { InstructionOperand* op) {
if (op->IsImmediate() || op->IsConstant()) { if (op->IsImmediate() || op->IsConstant()) {
gen_->tasm()->mov(destination, ToImmediate(op)); gen_->masm()->mov(destination, ToImmediate(op));
} else if (op->IsRegister()) { } else if (op->IsRegister()) {
gen_->tasm()->Move(destination, ToRegister(op)); gen_->masm()->Move(destination, ToRegister(op));
} else { } else {
gen_->tasm()->mov(destination, ToOperand(op)); gen_->masm()->mov(destination, ToOperand(op));
} }
} }
}; };
@ -475,7 +475,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
XMMRegister src0 = i.InputSimd128Register(0); \ XMMRegister src0 = i.InputSimd128Register(0); \
Operand src1 = i.InputOperand(instr->InputCount() == 2 ? 1 : 0); \ Operand src1 = i.InputOperand(instr->InputCount() == 2 ? 1 : 0); \
if (CpuFeatures::IsSupported(AVX)) { \ if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(tasm(), AVX); \ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##opcode(i.OutputSimd128Register(), src0, src1); \ __ v##opcode(i.OutputSimd128Register(), src0, src1); \
} else { \ } else { \
DCHECK_EQ(i.OutputSimd128Register(), src0); \ DCHECK_EQ(i.OutputSimd128Register(), src0); \
@ -485,11 +485,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
#define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, SSELevel, imm) \ #define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, SSELevel, imm) \
if (CpuFeatures::IsSupported(AVX)) { \ if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(tasm(), AVX); \ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \ __ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \
i.InputOperand(1), imm); \ i.InputOperand(1), imm); \
} else { \ } else { \
CpuFeatureScope sse_scope(tasm(), SSELevel); \ CpuFeatureScope sse_scope(masm(), SSELevel); \
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
__ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \ __ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \
} }
@ -532,26 +532,25 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
int8_t laneidx = i.InputInt8(1); \ int8_t laneidx = i.InputInt8(1); \
if (HasAddressingMode(instr)) { \ if (HasAddressingMode(instr)) { \
if (CpuFeatures::IsSupported(AVX)) { \ if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(tasm(), AVX); \ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##OPCODE(dst, src, i.MemoryOperand(2), laneidx); \ __ v##OPCODE(dst, src, i.MemoryOperand(2), laneidx); \
} else { \ } else { \
DCHECK_EQ(dst, src); \ DCHECK_EQ(dst, src); \
CpuFeatureScope sse_scope(tasm(), CPU_FEATURE); \ CpuFeatureScope sse_scope(masm(), CPU_FEATURE); \
__ OPCODE(dst, i.MemoryOperand(2), laneidx); \ __ OPCODE(dst, i.MemoryOperand(2), laneidx); \
} \ } \
} else { \ } else { \
if (CpuFeatures::IsSupported(AVX)) { \ if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(tasm(), AVX); \ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##OPCODE(dst, src, i.InputOperand(2), laneidx); \ __ v##OPCODE(dst, src, i.InputOperand(2), laneidx); \
} else { \ } else { \
DCHECK_EQ(dst, src); \ DCHECK_EQ(dst, src); \
CpuFeatureScope sse_scope(tasm(), CPU_FEATURE); \ CpuFeatureScope sse_scope(masm(), CPU_FEATURE); \
__ OPCODE(dst, i.InputOperand(2), laneidx); \ __ OPCODE(dst, i.InputOperand(2), laneidx); \
} \ } \
} \ } \
} while (false) } while (false)
void CodeGenerator::AssembleDeconstructFrame() { void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp); __ mov(esp, ebp);
__ pop(ebp); __ pop(ebp);
@ -566,7 +565,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace { namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm, void AdjustStackPointerForTailCall(MacroAssembler* masm,
FrameAccessState* state, FrameAccessState* state,
int new_slot_above_sp, int new_slot_above_sp,
bool allow_shrinkage = true) { bool allow_shrinkage = true) {
@ -574,10 +573,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
StandardFrameConstants::kFixedSlotCountAboveFp; StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset; int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) { if (stack_slot_delta > 0) {
tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize); masm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) { } else if (allow_shrinkage && stack_slot_delta < 0) {
tasm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize)); masm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} }
} }
@ -617,7 +616,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand destination_location( LocationOperand destination_location(
LocationOperand::cast(move->destination())); LocationOperand::cast(move->destination()));
InstructionOperand source(move->source()); InstructionOperand source(move->source());
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
destination_location.index()); destination_location.index());
if (source.IsStackSlot()) { if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source)); LocationOperand source_location(LocationOperand::cast(source));
@ -635,13 +634,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
move->Eliminate(); move->Eliminate();
} }
} }
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, false); first_unused_slot_offset, false);
} }
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) { int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset); first_unused_slot_offset);
} }
@ -884,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{ {
// We don't actually want to generate a pile of code for this, so just // We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one. // claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
@ -1262,7 +1261,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqrtss(i.OutputDoubleRegister(), i.InputOperand(0)); __ Sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
break; break;
case kIA32Float32Round: { case kIA32Float32Round: {
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
RoundingMode const mode = RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode())); static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@ -2112,12 +2111,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kIA32Insertps: { case kIA32Insertps: {
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
__ vinsertps(i.OutputSimd128Register(), i.InputSimd128Register(0), __ vinsertps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(2), i.InputInt8(1) << 4); i.InputOperand(2), i.InputInt8(1) << 4);
} else { } else {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
__ insertps(i.OutputSimd128Register(), i.InputOperand(2), __ insertps(i.OutputSimd128Register(), i.InputOperand(2),
i.InputInt8(1) << 4); i.InputInt8(1) << 4);
} }
@ -2315,12 +2314,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src1 = i.InputSimd128Register(0); XMMRegister src1 = i.InputSimd128Register(0);
XMMRegister src2 = i.InputSimd128Register(1); XMMRegister src2 = i.InputSimd128Register(1);
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
__ vpminsd(kScratchDoubleReg, src1, src2); __ vpminsd(kScratchDoubleReg, src1, src2);
__ vpcmpeqd(dst, kScratchDoubleReg, src2); __ vpcmpeqd(dst, kScratchDoubleReg, src2);
} else { } else {
DCHECK_EQ(dst, src1); DCHECK_EQ(dst, src1);
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pminsd(dst, src2); __ pminsd(dst, src2);
__ pcmpeqd(dst, src2); __ pcmpeqd(dst, src2);
} }
@ -2328,7 +2327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kSSEI32x4UConvertF32x4: { case kSSEI32x4UConvertF32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
XMMRegister tmp = i.TempSimd128Register(0); XMMRegister tmp = i.TempSimd128Register(0);
// NAN->0, negative->0 // NAN->0, negative->0
@ -2356,7 +2355,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kAVXI32x4UConvertF32x4: { case kAVXI32x4UConvertF32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
XMMRegister tmp = i.TempSimd128Register(0); XMMRegister tmp = i.TempSimd128Register(0);
// NAN->0, negative->0 // NAN->0, negative->0
@ -2406,7 +2405,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kSSEI32x4GtU: { case kSSEI32x4GtU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1); Operand src = i.InputOperand(1);
__ pmaxud(dst, src); __ pmaxud(dst, src);
@ -2416,7 +2415,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXI32x4GtU: { case kAVXI32x4GtU: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
XMMRegister src1 = i.InputSimd128Register(0); XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1); Operand src2 = i.InputOperand(1);
@ -2428,7 +2427,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kSSEI32x4GeU: { case kSSEI32x4GeU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1); Operand src = i.InputOperand(1);
__ pminud(dst, src); __ pminud(dst, src);
@ -2436,7 +2435,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXI32x4GeU: { case kAVXI32x4GeU: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0); XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1); Operand src2 = i.InputOperand(1);
__ vpminud(kScratchDoubleReg, src1, src2); __ vpminud(kScratchDoubleReg, src1, src2);
@ -2552,7 +2551,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXI16x8Ne: { case kAVXI16x8Ne: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0), __ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1)); i.InputOperand(1));
__ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); __ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
@ -2574,7 +2573,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXI16x8GeS: { case kAVXI16x8GeS: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0); XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1); Operand src2 = i.InputOperand(1);
__ vpminsw(kScratchDoubleReg, src1, src2); __ vpminsw(kScratchDoubleReg, src1, src2);
@ -2621,7 +2620,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kSSEI16x8GtU: { case kSSEI16x8GtU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1); Operand src = i.InputOperand(1);
__ pmaxuw(dst, src); __ pmaxuw(dst, src);
@ -2631,7 +2630,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXI16x8GtU: { case kAVXI16x8GtU: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
XMMRegister src1 = i.InputSimd128Register(0); XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1); Operand src2 = i.InputOperand(1);
@ -2643,7 +2642,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kSSEI16x8GeU: { case kSSEI16x8GeU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1); Operand src = i.InputOperand(1);
__ pminuw(dst, src); __ pminuw(dst, src);
@ -2651,7 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXI16x8GeU: { case kAVXI16x8GeU: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0); XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1); Operand src2 = i.InputOperand(1);
__ vpminuw(kScratchDoubleReg, src1, src2); __ vpminuw(kScratchDoubleReg, src1, src2);
@ -2844,7 +2843,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXI8x16Ne: { case kAVXI8x16Ne: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0), __ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1)); i.InputOperand(1));
__ vpcmpeqb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); __ vpcmpeqb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
@ -2859,7 +2858,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kSSEI8x16GeS: { case kSSEI8x16GeS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1); Operand src = i.InputOperand(1);
__ pminsb(dst, src); __ pminsb(dst, src);
@ -2867,7 +2866,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXI8x16GeS: { case kAVXI8x16GeS: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0); XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1); Operand src2 = i.InputOperand(1);
__ vpminsb(kScratchDoubleReg, src1, src2); __ vpminsb(kScratchDoubleReg, src1, src2);
@ -2925,7 +2924,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXI8x16GtU: { case kAVXI8x16GtU: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
XMMRegister src1 = i.InputSimd128Register(0); XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1); Operand src2 = i.InputOperand(1);
@ -2944,7 +2943,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXI8x16GeU: { case kAVXI8x16GeU: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0); XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1); Operand src2 = i.InputOperand(1);
__ vpminub(kScratchDoubleReg, src1, src2); __ vpminub(kScratchDoubleReg, src1, src2);
@ -3183,7 +3182,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src = i.InputSimd128Register(0); XMMRegister src = i.InputSimd128Register(0);
uint8_t lane = i.InputUint8(1) & 0xf; uint8_t lane = i.InputUint8(1) & 0xf;
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
if (lane < 8) { if (lane < 8) {
__ vpunpcklbw(dst, src, src); __ vpunpcklbw(dst, src, src);
} else { } else {
@ -3234,7 +3233,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklbw); ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklbw);
break; break;
case kSSES16x8UnzipHigh: { case kSSES16x8UnzipHigh: {
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst; XMMRegister src2 = dst;
DCHECK_EQ(dst, i.InputSimd128Register(0)); DCHECK_EQ(dst, i.InputSimd128Register(0));
@ -3248,7 +3247,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXS16x8UnzipHigh: { case kAVXS16x8UnzipHigh: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst; XMMRegister src2 = dst;
if (instr->InputCount() == 2) { if (instr->InputCount() == 2) {
@ -3260,7 +3259,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kSSES16x8UnzipLow: { case kSSES16x8UnzipLow: {
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst; XMMRegister src2 = dst;
DCHECK_EQ(dst, i.InputSimd128Register(0)); DCHECK_EQ(dst, i.InputSimd128Register(0));
@ -3274,7 +3273,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXS16x8UnzipLow: { case kAVXS16x8UnzipLow: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst; XMMRegister src2 = dst;
__ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); __ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
@ -3301,7 +3300,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXS8x16UnzipHigh: { case kAVXS8x16UnzipHigh: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst; XMMRegister src2 = dst;
if (instr->InputCount() == 2) { if (instr->InputCount() == 2) {
@ -3328,7 +3327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXS8x16UnzipLow: { case kAVXS8x16UnzipLow: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
XMMRegister src2 = dst; XMMRegister src2 = dst;
if (instr->InputCount() == 2) { if (instr->InputCount() == 2) {
@ -3357,7 +3356,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXS8x16TransposeLow: { case kAVXS8x16TransposeLow: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
if (instr->InputCount() == 1) { if (instr->InputCount() == 1) {
__ vpsllw(kScratchDoubleReg, i.InputSimd128Register(0), 8); __ vpsllw(kScratchDoubleReg, i.InputSimd128Register(0), 8);
@ -3387,7 +3386,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kAVXS8x16TransposeHigh: { case kAVXS8x16TransposeHigh: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
if (instr->InputCount() == 1) { if (instr->InputCount() == 1) {
__ vpsrlw(dst, i.InputSimd128Register(0), 8); __ vpsrlw(dst, i.InputSimd128Register(0), 8);
@ -3423,7 +3422,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAVXS8x4Reverse: case kAVXS8x4Reverse:
case kAVXS8x8Reverse: { case kAVXS8x8Reverse: {
DCHECK_EQ(1, instr->InputCount()); DCHECK_EQ(1, instr->InputCount());
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = dst; XMMRegister src = dst;
if (arch_opcode != kAVXS8x2Reverse) { if (arch_opcode != kAVXS8x2Reverse) {
@ -4205,8 +4204,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ j(greater, &mismatch_return, Label::kNear); __ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg); __ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return); __ bind(&mismatch_return);
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger, __ DropArguments(argc_reg, scratch_reg, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
// We use a return instead of a jump for better return address prediction. // We use a return instead of a jump for better return address prediction.
__ Ret(); __ Ret();
} else if (additional_pop_count->IsImmediate()) { } else if (additional_pop_count->IsImmediate()) {

View File

@ -18,7 +18,7 @@
#include "src/codegen/ia32/assembler-ia32.h" #include "src/codegen/ia32/assembler-ia32.h"
#include "src/codegen/ia32/register-ia32.h" #include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/machine-type.h" #include "src/codegen/machine-type.h"
#include "src/codegen/turbo-assembler.h" #include "src/codegen/macro-assembler-base.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/compiler/backend/instruction-codes.h" #include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/backend/instruction-selector-impl.h"
@ -208,7 +208,7 @@ class IA32OperandGenerator final : public OperandGenerator {
m.object().ResolvedValue())) { m.object().ResolvedValue())) {
ptrdiff_t const delta = ptrdiff_t const delta =
m.index().ResolvedValue() + m.index().ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference( MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector()->isolate(), m.object().ResolvedValue()); selector()->isolate(), m.object().ResolvedValue());
if (is_int32(delta)) { if (is_int32(delta)) {
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta)); inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));

View File

@ -451,7 +451,7 @@ bool InstructionSelector::CanAddressRelativeToRootsRegister(
// 3. IsAddressableThroughRootRegister: Is the target address guaranteed to // 3. IsAddressableThroughRootRegister: Is the target address guaranteed to
// have a fixed root-relative offset? If so, we can ignore 2. // have a fixed root-relative offset? If so, we can ignore 2.
const bool this_root_relative_offset_is_constant = const bool this_root_relative_offset_is_constant =
TurboAssemblerBase::IsAddressableThroughRootRegister(isolate(), MacroAssemblerBase::IsAddressableThroughRootRegister(isolate(),
reference); reference);
return this_root_relative_offset_is_constant; return this_root_relative_offset_is_constant;
} }

View File

@ -23,7 +23,7 @@ namespace v8 {
namespace internal { namespace internal {
namespace compiler { namespace compiler {
#define __ tasm()-> #define __ masm()->
// TODO(LOONG_dev): consider renaming these macros. // TODO(LOONG_dev): consider renaming these macros.
#define TRACE_MSG(msg) \ #define TRACE_MSG(msg) \
@ -450,8 +450,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_IEEE754_BINOP(name) \ #define ASSEMBLE_IEEE754_BINOP(name) \
do { \ do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
UseScratchRegisterScope temps(tasm()); \ UseScratchRegisterScope temps(masm()); \
Register scratch = temps.Acquire(); \ Register scratch = temps.Acquire(); \
__ PrepareCallCFunction(0, 2, scratch); \ __ PrepareCallCFunction(0, 2, scratch); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
@ -459,8 +459,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_IEEE754_UNOP(name) \ #define ASSEMBLE_IEEE754_UNOP(name) \
do { \ do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
UseScratchRegisterScope temps(tasm()); \ UseScratchRegisterScope temps(masm()); \
Register scratch = temps.Acquire(); \ Register scratch = temps.Acquire(); \
__ PrepareCallCFunction(0, 1, scratch); \ __ PrepareCallCFunction(0, 1, scratch); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@ -487,7 +487,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace { namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm, void AdjustStackPointerForTailCall(MacroAssembler* masm,
FrameAccessState* state, FrameAccessState* state,
int new_slot_above_sp, int new_slot_above_sp,
bool allow_shrinkage = true) { bool allow_shrinkage = true) {
@ -495,10 +495,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
StandardFrameConstants::kFixedSlotCountAboveFp; StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset; int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) { if (stack_slot_delta > 0) {
tasm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize); masm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) { } else if (allow_shrinkage && stack_slot_delta < 0) {
tasm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize); masm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} }
} }
@ -507,19 +507,19 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_slot_offset) { int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, false); first_unused_slot_offset, false);
} }
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) { int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset); first_unused_slot_offset);
} }
// Check that {kJavaScriptCallCodeStartRegister} is correct. // Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() { void CodeGenerator::AssembleCodeStartRegisterCheck() {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ ComputeCodeStartAddress(scratch); __ ComputeCodeStartAddress(scratch);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart, __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
@ -534,7 +534,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 2. test kMarkedForDeoptimizationBit in those flags; and // 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin. // 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() { void CodeGenerator::BailoutIfDeoptimized() {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
__ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset)); __ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
@ -628,7 +628,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallJSFunction: { case kArchCallJSFunction: {
Register func = i.InputRegister(0); Register func = i.InputRegister(0);
if (v8_flags.debug_code) { if (v8_flags.debug_code) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
// Check the function's context matches the context argument. // Check the function's context matches the context argument.
__ Ld_d(scratch, FieldMemOperand(func, JSFunction::kContextOffset)); __ Ld_d(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
@ -642,7 +642,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArchPrepareCallCFunction: { case kArchPrepareCallCFunction: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
int const num_gp_parameters = ParamField::decode(instr->opcode()); int const num_gp_parameters = ParamField::decode(instr->opcode());
int const num_fp_parameters = FPParamField::decode(instr->opcode()); int const num_fp_parameters = FPParamField::decode(instr->opcode());
@ -749,7 +749,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{ {
// We don't actually want to generate a pile of code for this, so just // We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one. // claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
@ -829,7 +829,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else { } else {
DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode); DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
DCHECK_EQ(addressing_mode, kMode_MRI); DCHECK_EQ(addressing_mode, kMode_MRI);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ Add_d(scratch, object, Operand(i.InputInt64(1))); __ Add_d(scratch, object, Operand(i.InputInt64(1)));
__ amswap_db_d(zero_reg, value, scratch); __ amswap_db_d(zero_reg, value, scratch);
@ -843,7 +843,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArchStackSlot: { case kArchStackSlot: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
FrameOffset offset = FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0)); frame_access_state()->GetFrameOffset(i.InputInt32(0));
@ -1225,8 +1225,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
case kLoong64Float64Mod: { case kLoong64Float64Mod: {
// TODO(turbofan): implement directly. // TODO(turbofan): implement directly.
FrameScope scope(tasm(), StackFrame::MANUAL); FrameScope scope(masm(), StackFrame::MANUAL);
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ PrepareCallCFunction(0, 2, scratch); __ PrepareCallCFunction(0, 2, scratch);
__ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
@ -1363,7 +1363,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ftintrz_w_s(scratch_d, i.InputDoubleRegister(0)); __ ftintrz_w_s(scratch_d, i.InputDoubleRegister(0));
__ movfr2gr_s(i.OutputRegister(), scratch_d); __ movfr2gr_s(i.OutputRegister(), scratch_d);
if (set_overflow_to_min_i32) { if (set_overflow_to_min_i32) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
// because INT32_MIN allows easier out-of-bounds detection. // because INT32_MIN allows easier out-of-bounds detection.
@ -1392,7 +1392,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kLoong64Float64ToInt64: { case kLoong64Float64ToInt64: {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
FPURegister scratch_d = kScratchDoubleReg; FPURegister scratch_d = kScratchDoubleReg;
@ -1438,7 +1438,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode()); bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
__ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch); __ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
if (set_overflow_to_min_i32) { if (set_overflow_to_min_i32) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
// Avoid UINT32_MAX as an overflow indicator and use 0 instead, // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
// because 0 allows easier out-of-bounds detection. // because 0 allows easier out-of-bounds detection.
@ -1863,11 +1863,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
<< "\""; \ << "\""; \
UNIMPLEMENTED(); UNIMPLEMENTED();
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
Instruction* instr, FlagsCondition condition, Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) { Label* tlabel, Label* flabel, bool fallthru) {
#undef __ #undef __
#define __ tasm-> #define __ masm->
Loong64OperandConverter i(gen, instr); Loong64OperandConverter i(gen, instr);
// LOONG64 does not have condition code flags, so compare and branch are // LOONG64 does not have condition code flags, so compare and branch are
@ -1882,7 +1882,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
__ Branch(tlabel, cc, t8, Operand(zero_reg)); __ Branch(tlabel, cc, t8, Operand(zero_reg));
} else if (instr->arch_opcode() == kLoong64Add_d || } else if (instr->arch_opcode() == kLoong64Add_d ||
instr->arch_opcode() == kLoong64Sub_d) { instr->arch_opcode() == kLoong64Sub_d) {
UseScratchRegisterScope temps(tasm); UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
Register scratch2 = temps.Acquire(); Register scratch2 = temps.Acquire();
Condition cc = FlagsConditionToConditionOvf(condition); Condition cc = FlagsConditionToConditionOvf(condition);
@ -1941,7 +1941,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
} }
if (!fallthru) __ Branch(flabel); // no fallthru to flabel. if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
#undef __ #undef __
#define __ tasm()-> #define __ masm()->
} }
// Assembles branches after an instruction. // Assembles branches after an instruction.
@ -1949,7 +1949,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Label* tlabel = branch->true_label; Label* tlabel = branch->true_label;
Label* flabel = branch->false_label; Label* flabel = branch->false_label;
AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
branch->fallthru); branch->fallthru);
} }
@ -2014,7 +2014,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
}; };
auto ool = zone()->New<OutOfLineTrap>(this, instr); auto ool = zone()->New<OutOfLineTrap>(this, instr);
Label* tlabel = ool->entry(); Label* tlabel = ool->entry();
AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
} }
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY
@ -2041,7 +2041,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
return; return;
} else if (instr->arch_opcode() == kLoong64Add_d || } else if (instr->arch_opcode() == kLoong64Add_d ||
instr->arch_opcode() == kLoong64Sub_d) { instr->arch_opcode() == kLoong64Sub_d) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
Condition cc = FlagsConditionToConditionOvf(condition); Condition cc = FlagsConditionToConditionOvf(condition);
// Check for overflow creates 1 or 0 for result. // Check for overflow creates 1 or 0 for result.
@ -2289,7 +2289,7 @@ void CodeGenerator::AssembleConstructFrame() {
// exception unconditionally. Thereby we can avoid the integer overflow // exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code. // check in the condition code.
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) { if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ Ld_d(scratch, FieldMemOperand( __ Ld_d(scratch, FieldMemOperand(
kWasmInstanceRegister, kWasmInstanceRegister,
@ -2444,7 +2444,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
__ Push(g.ToRegister(source)); __ Push(g.ToRegister(source));
frame_access_state()->IncreaseSPDelta(new_slots); frame_access_state()->IncreaseSPDelta(new_slots);
} else if (source->IsStackSlot()) { } else if (source->IsStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ Ld_d(scratch, g.ToMemOperand(source)); __ Ld_d(scratch, g.ToMemOperand(source));
__ Push(scratch); __ Push(scratch);
@ -2467,7 +2467,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
if (dest->IsRegister()) { if (dest->IsRegister()) {
__ Pop(g.ToRegister(dest)); __ Pop(g.ToRegister(dest));
} else if (dest->IsStackSlot()) { } else if (dest->IsStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ Pop(scratch); __ Pop(scratch);
__ St_d(scratch, g.ToMemOperand(dest)); __ St_d(scratch, g.ToMemOperand(dest));
@ -2495,7 +2495,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
MachineRepresentation rep) { MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}. // Must be kept in sync with {MoveTempLocationTo}.
DCHECK(!source->IsImmediate()); DCHECK(!source->IsImmediate());
move_cycle_.temps.emplace(tasm()); move_cycle_.temps.emplace(masm());
auto& temps = *move_cycle_.temps; auto& temps = *move_cycle_.temps;
// Temporarily exclude the reserved scratch registers while we pick one to // Temporarily exclude the reserved scratch registers while we pick one to
// resolve the move cycle. Re-include them immediately afterwards as they // resolve the move cycle. Re-include them immediately afterwards as they
@ -2585,7 +2585,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
void CodeGenerator::SetPendingMove(MoveOperands* move) { void CodeGenerator::SetPendingMove(MoveOperands* move) {
InstructionOperand* src = &move->source(); InstructionOperand* src = &move->source();
InstructionOperand* dst = &move->destination(); InstructionOperand* dst = &move->destination();
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
if (src->IsConstant() || (src->IsStackSlot() && dst->IsStackSlot())) { if (src->IsConstant() || (src->IsStackSlot() && dst->IsStackSlot())) {
Register temp = temps.Acquire(); Register temp = temps.Acquire();
move_cycle_.scratch_regs.set(temp); move_cycle_.scratch_regs.set(temp);
@ -2642,7 +2642,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsRegister()) { if (destination->IsRegister()) {
__ Ld_d(g.ToRegister(destination), src); __ Ld_d(g.ToRegister(destination), src);
} else { } else {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ Ld_d(scratch, src); __ Ld_d(scratch, src);
__ St_d(scratch, g.ToMemOperand(destination)); __ St_d(scratch, g.ToMemOperand(destination));
@ -2650,7 +2650,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsConstant()) { } else if (source->IsConstant()) {
Constant src = g.ToConstant(source); Constant src = g.ToConstant(source);
if (destination->IsRegister() || destination->IsStackSlot()) { if (destination->IsRegister() || destination->IsStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
Register dst = Register dst =
destination->IsRegister() ? g.ToRegister(destination) : scratch; destination->IsRegister() ? g.ToRegister(destination) : scratch;
@ -2697,7 +2697,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) { if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) {
__ St_d(zero_reg, dst); __ St_d(zero_reg, dst);
} else { } else {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ li(scratch, Operand(base::bit_cast<int32_t>(src.ToFloat32()))); __ li(scratch, Operand(base::bit_cast<int32_t>(src.ToFloat32())));
__ St_d(scratch, dst); __ St_d(scratch, dst);
@ -2748,7 +2748,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
// Dispatch on the source and destination operand kinds. Not all // Dispatch on the source and destination operand kinds. Not all
// combinations are possible. // combinations are possible.
if (source->IsRegister()) { if (source->IsRegister()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
// Register-register. // Register-register.
Register src = g.ToRegister(source); Register src = g.ToRegister(source);
@ -2770,7 +2770,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
// Since the Ld instruction may need a scratch reg, // Since the Ld instruction may need a scratch reg,
// we should not use both of the two scratch registers in // we should not use both of the two scratch registers in
// UseScratchRegisterScope here. // UseScratchRegisterScope here.
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
FPURegister scratch_d = kScratchDoubleReg; FPURegister scratch_d = kScratchDoubleReg;
MemOperand src = g.ToMemOperand(source); MemOperand src = g.ToMemOperand(source);
@ -2796,7 +2796,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
} }
} else if (source->IsFPStackSlot()) { } else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot()); DCHECK(destination->IsFPStackSlot());
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
FPURegister scratch_d = kScratchDoubleReg; FPURegister scratch_d = kScratchDoubleReg;
MemOperand src = g.ToMemOperand(source); MemOperand src = g.ToMemOperand(source);

View File

@ -360,7 +360,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) { selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta = ptrdiff_t const delta =
g.GetIntegerConstantValue(index) + g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference( MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue()); selector->isolate(), m.ResolvedValue());
// Check that the delta is a 32-bit integer due to the limitations of // Check that the delta is a 32-bit integer due to the limitations of
// immediate operands. // immediate operands.
@ -560,7 +560,7 @@ void InstructionSelector::VisitStore(Node* node) {
CanAddressRelativeToRootsRegister(m.ResolvedValue())) { CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta = ptrdiff_t const delta =
g.GetIntegerConstantValue(index) + g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference( MacroAssemblerBase::RootRegisterOffsetForExternalReference(
isolate(), m.ResolvedValue()); isolate(), m.ResolvedValue());
// Check that the delta is a 32-bit integer due to the limitations of // Check that the delta is a 32-bit integer due to the limitations of
// immediate operands. // immediate operands.

File diff suppressed because it is too large Load Diff

View File

@ -775,7 +775,7 @@ int PrepareForTailCallLatency() {
int AssertLatency() { return 1; } int AssertLatency() { return 1; }
int PrepareCallCFunctionLatency() { int PrepareCallCFunctionLatency() {
int frame_alignment = TurboAssembler::ActivationFrameAlignment(); int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (frame_alignment > kSystemPointerSize) { if (frame_alignment > kSystemPointerSize) {
return 1 + DsubuLatency(false) + AndLatency(false) + 1; return 1 + DsubuLatency(false) + AndLatency(false) + 1;
} else { } else {

View File

@ -23,7 +23,7 @@ namespace v8 {
namespace internal { namespace internal {
namespace compiler { namespace compiler {
#define __ tasm()-> #define __ masm()->
#define kScratchReg r11 #define kScratchReg r11
@ -170,7 +170,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} }
void Generate() final { void Generate() final {
ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); ConstantPoolUnavailableScope constant_pool_unavailable(masm());
if (COMPRESS_POINTERS_BOOL) { if (COMPRESS_POINTERS_BOOL) {
__ DecompressTaggedPointer(value_, value_); __ DecompressTaggedPointer(value_, value_);
} }
@ -409,7 +409,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
#define ASSEMBLE_FLOAT_MODULO() \ #define ASSEMBLE_FLOAT_MODULO() \
do { \ do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \ __ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \ __ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \ i.InputDoubleRegister(1)); \
@ -422,7 +422,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
do { \ do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \ /* and generate a CallAddress instruction instead. */ \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \ __ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@ -435,7 +435,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
do { \ do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \ /* and generate a CallAddress instruction instead. */ \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \ __ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \ __ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \ i.InputDoubleRegister(1)); \
@ -680,20 +680,20 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace { namespace {
void FlushPendingPushRegisters(TurboAssembler* tasm, void FlushPendingPushRegisters(MacroAssembler* masm,
FrameAccessState* frame_access_state, FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) { ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) { switch (pending_pushes->size()) {
case 0: case 0:
break; break;
case 1: case 1:
tasm->Push((*pending_pushes)[0]); masm->Push((*pending_pushes)[0]);
break; break;
case 2: case 2:
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]); masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break; break;
case 3: case 3:
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1], masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]); (*pending_pushes)[2]);
break; break;
default: default:
@ -704,7 +704,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
} }
void AdjustStackPointerForTailCall( void AdjustStackPointerForTailCall(
TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp, MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr, ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) { bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() + int current_sp_offset = state->GetSPToFPSlotCount() +
@ -712,15 +712,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset; int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) { if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) { if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes); FlushPendingPushRegisters(masm, state, pending_pushes);
} }
tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0); masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) { } else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) { if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes); FlushPendingPushRegisters(masm, state, pending_pushes);
} }
tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0); masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} }
} }
@ -742,7 +742,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination())); LocationOperand::cast(move->destination()));
InstructionOperand source(move->source()); InstructionOperand source(move->source());
AdjustStackPointerForTailCall( AdjustStackPointerForTailCall(
tasm(), frame_access_state(), masm(), frame_access_state(),
destination_location.index() - pending_pushes.size(), destination_location.index() - pending_pushes.size(),
&pending_pushes); &pending_pushes);
// Pushes of non-register data types are not supported. // Pushes of non-register data types are not supported.
@ -752,20 +752,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
// TODO(arm): We can push more than 3 registers at once. Add support in // TODO(arm): We can push more than 3 registers at once. Add support in
// the macro-assembler for pushing a list of registers. // the macro-assembler for pushing a list of registers.
if (pending_pushes.size() == 3) { if (pending_pushes.size() == 3) {
FlushPendingPushRegisters(tasm(), frame_access_state(), FlushPendingPushRegisters(masm(), frame_access_state(),
&pending_pushes); &pending_pushes);
} }
move->Eliminate(); move->Eliminate();
} }
FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes); FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
} }
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, nullptr, false); first_unused_slot_offset, nullptr, false);
} }
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) { int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset); first_unused_slot_offset);
} }
@ -810,7 +810,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
switch (opcode) { switch (opcode) {
case kArchCallCodeObject: { case kArchCallCodeObject: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool( v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
tasm()); masm());
if (HasRegisterInput(instr, 0)) { if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0); Register reg = i.InputRegister(0);
DCHECK_IMPLIES( DCHECK_IMPLIES(
@ -883,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else { } else {
// We cannot use the constant pool to load the target since // We cannot use the constant pool to load the target since
// we've already restored the caller's frame. // we've already restored the caller's frame.
ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); ConstantPoolUnavailableScope constant_pool_unavailable(masm());
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} }
DCHECK_EQ(LeaveRC, i.OutputRCBit()); DCHECK_EQ(LeaveRC, i.OutputRCBit());
@ -904,7 +904,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kArchCallJSFunction: { case kArchCallJSFunction: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool( v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
tasm()); masm());
Register func = i.InputRegister(0); Register func = i.InputRegister(0);
if (v8_flags.debug_code) { if (v8_flags.debug_code) {
// Check the function's context matches the context argument. // Check the function's context matches the context argument.
@ -1058,7 +1058,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{ {
// We don't actually want to generate a pile of code for this, so just // We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one. // claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
@ -3320,7 +3320,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
AssembleDeconstructFrame(); AssembleDeconstructFrame();
} }
// Constant pool is unavailable since the frame has been destructed // Constant pool is unavailable since the frame has been destructed
ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); ConstantPoolUnavailableScope constant_pool_unavailable(masm());
if (drop_jsargs) { if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). // We must pop all arguments from the stack (including the receiver).
// The number of arguments without the receiver is // The number of arguments without the receiver is
@ -3334,8 +3334,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ mov(argc_reg, Operand(parameter_slots)); __ mov(argc_reg, Operand(parameter_slots));
__ bind(&skip); __ bind(&skip);
} }
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger, __ DropArguments(argc_reg, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} else if (additional_pop_count->IsImmediate()) { } else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32(); int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_slots + additional_count); __ Drop(parameter_slots + additional_count);
@ -3391,7 +3391,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
frame_access_state()->IncreaseSPDelta(-new_slots); frame_access_state()->IncreaseSPDelta(-new_slots);
PPCOperandConverter g(this, nullptr); PPCOperandConverter g(this, nullptr);
if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) { if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ Pop(scratch); __ Pop(scratch);
__ StoreU64(scratch, g.ToMemOperand(dest), r0); __ StoreU64(scratch, g.ToMemOperand(dest), r0);

View File

@ -19,7 +19,7 @@ namespace v8 {
namespace internal { namespace internal {
namespace compiler { namespace compiler {
#define __ tasm()-> #define __ masm()->
// TODO(plind): consider renaming these macros. // TODO(plind): consider renaming these macros.
#define TRACE_MSG(msg) \ #define TRACE_MSG(msg) \
@ -334,7 +334,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr, external) \ #define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr, external) \
do { \ do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); \ __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); \
__ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \ __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \
__ PrepareCallCFunction(3, 0, kScratchReg); \ __ PrepareCallCFunction(3, 0, kScratchReg); \
@ -344,7 +344,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr, external) \ #define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr, external) \
do { \ do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); \ __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); \
__ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \ __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \
__ PrepareCallCFunction(3, 0, kScratchReg); \ __ PrepareCallCFunction(3, 0, kScratchReg); \
@ -473,7 +473,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_IEEE754_BINOP(name) \ #define ASSEMBLE_IEEE754_BINOP(name) \
do { \ do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \ __ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \ __ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \ i.InputDoubleRegister(1)); \
@ -484,7 +484,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
#define ASSEMBLE_IEEE754_UNOP(name) \ #define ASSEMBLE_IEEE754_UNOP(name) \
do { \ do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \ __ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@ -582,7 +582,7 @@ void CodeGenerator::AssembleArchSelect(Instruction* instr,
namespace { namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm, void AdjustStackPointerForTailCall(MacroAssembler* masm,
FrameAccessState* state, FrameAccessState* state,
int new_slot_above_sp, int new_slot_above_sp,
bool allow_shrinkage = true) { bool allow_shrinkage = true) {
@ -590,10 +590,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
StandardFrameConstants::kFixedSlotCountAboveFp; StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset; int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) { if (stack_slot_delta > 0) {
tasm->SubWord(sp, sp, stack_slot_delta * kSystemPointerSize); masm->SubWord(sp, sp, stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) { } else if (allow_shrinkage && stack_slot_delta < 0) {
tasm->AddWord(sp, sp, -stack_slot_delta * kSystemPointerSize); masm->AddWord(sp, sp, -stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} }
} }
@ -602,13 +602,13 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_slot_offset) { int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, false); first_unused_slot_offset, false);
} }
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) { int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset); first_unused_slot_offset);
} }
@ -829,7 +829,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{ {
// We don't actually want to generate a pile of code for this, so just // We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one. // claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
@ -1295,7 +1295,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvModS: { case kRiscvModS: {
// TODO(bmeurer): We should really get rid of this special instruction, // TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead. // and generate a CallAddress instruction instead.
FrameScope scope(tasm(), StackFrame::MANUAL); FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg); __ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0), __ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1)); i.InputDoubleRegister(1));
@ -1425,7 +1425,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvModD: { case kRiscvModD: {
// TODO(bmeurer): We should really get rid of this special instruction, // TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead. // and generate a CallAddress instruction instead.
FrameScope scope(tasm(), StackFrame::MANUAL); FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg); __ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0), __ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1)); i.InputDoubleRegister(1));
@ -1940,7 +1940,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
#if V8_TARGET_ARCH_RISCV32 #if V8_TARGET_ARCH_RISCV32
case kRiscvWord32AtomicPairLoad: { case kRiscvWord32AtomicPairLoad: {
FrameScope scope(tasm(), StackFrame::MANUAL); FrameScope scope(masm(), StackFrame::MANUAL);
__ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); __ AddWord(a0, i.InputRegister(0), i.InputRegister(1));
__ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1);
__ PrepareCallCFunction(1, 0, kScratchReg); __ PrepareCallCFunction(1, 0, kScratchReg);
@ -1949,7 +1949,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kRiscvWord32AtomicPairStore: { case kRiscvWord32AtomicPairStore: {
FrameScope scope(tasm(), StackFrame::MANUAL); FrameScope scope(masm(), StackFrame::MANUAL);
__ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); __ AddWord(a0, i.InputRegister(0), i.InputRegister(1));
__ PushCallerSaved(SaveFPRegsMode::kIgnore); __ PushCallerSaved(SaveFPRegsMode::kIgnore);
__ PrepareCallCFunction(3, 0, kScratchReg); __ PrepareCallCFunction(3, 0, kScratchReg);
@ -1972,7 +1972,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair, atomic_pair_or_function) ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair, atomic_pair_or_function)
ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair, atomic_pair_xor_function) ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair, atomic_pair_xor_function)
case kRiscvWord32AtomicPairExchange: { case kRiscvWord32AtomicPairExchange: {
FrameScope scope(tasm(), StackFrame::MANUAL); FrameScope scope(masm(), StackFrame::MANUAL);
__ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1);
__ PrepareCallCFunction(3, 0, kScratchReg); __ PrepareCallCFunction(3, 0, kScratchReg);
__ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); __ AddWord(a0, i.InputRegister(0), i.InputRegister(1));
@ -1982,7 +1982,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kRiscvWord32AtomicPairCompareExchange: { case kRiscvWord32AtomicPairCompareExchange: {
FrameScope scope(tasm(), StackFrame::MANUAL); FrameScope scope(masm(), StackFrame::MANUAL);
__ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1);
__ PrepareCallCFunction(5, 0, kScratchReg); __ PrepareCallCFunction(5, 0, kScratchReg);
__ add(a0, i.InputRegister(0), i.InputRegister(1)); __ add(a0, i.InputRegister(0), i.InputRegister(1));
@ -3711,11 +3711,11 @@ bool IsInludeEqual(Condition cc) {
} }
} }
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
Instruction* instr, FlagsCondition condition, Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) { Label* tlabel, Label* flabel, bool fallthru) {
#undef __ #undef __
#define __ tasm-> #define __ masm->
RiscvOperandConverter i(gen, instr); RiscvOperandConverter i(gen, instr);
// RISC-V does not have condition code flags, so compare and branch are // RISC-V does not have condition code flags, so compare and branch are
@ -3806,7 +3806,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
} }
if (!fallthru) __ Branch(flabel); // no fallthru to flabel. if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
#undef __ #undef __
#define __ tasm()-> #define __ masm()->
} }
// Assembles branches after an instruction. // Assembles branches after an instruction.
@ -3814,7 +3814,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Label* tlabel = branch->true_label; Label* tlabel = branch->true_label;
Label* flabel = branch->false_label; Label* flabel = branch->false_label;
AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
branch->fallthru); branch->fallthru);
} }
@ -3878,7 +3878,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
}; };
auto ool = zone()->New<OutOfLineTrap>(this, instr); auto ool = zone()->New<OutOfLineTrap>(this, instr);
Label* tlabel = ool->entry(); Label* tlabel = ool->entry();
AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
} }
// Assembles boolean materializations after an instruction. // Assembles boolean materializations after an instruction.
@ -4373,7 +4373,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
MachineRepresentation rep) { MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}. // Must be kept in sync with {MoveTempLocationTo}.
DCHECK(!source->IsImmediate()); DCHECK(!source->IsImmediate());
move_cycle_.temps.emplace(tasm()); move_cycle_.temps.emplace(masm());
auto& temps = *move_cycle_.temps; auto& temps = *move_cycle_.temps;
// Temporarily exclude the reserved scratch registers while we pick one to // Temporarily exclude the reserved scratch registers while we pick one to
// resolve the move cycle. Re-include them immediately afterwards as they // resolve the move cycle. Re-include them immediately afterwards as they
@ -4419,7 +4419,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
void CodeGenerator::SetPendingMove(MoveOperands* move) { void CodeGenerator::SetPendingMove(MoveOperands* move) {
InstructionOperand* src = &move->source(); InstructionOperand* src = &move->source();
InstructionOperand* dst = &move->destination(); InstructionOperand* dst = &move->destination();
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
if (src->IsConstant() && dst->IsFPLocationOperand()) { if (src->IsConstant() && dst->IsFPLocationOperand()) {
Register temp = temps.Acquire(); Register temp = temps.Acquire();
move_cycle_.scratch_regs.set(temp); move_cycle_.scratch_regs.set(temp);
@ -4748,7 +4748,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
} }
} }
#endif #endif
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(masm());
Register temp_0 = kScratchReg; Register temp_0 = kScratchReg;
Register temp_1 = kScratchReg2; Register temp_1 = kScratchReg2;
__ LoadWord(temp_0, src); __ LoadWord(temp_0, src);
@ -4775,7 +4775,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
__ Push(g.ToRegister(source)); __ Push(g.ToRegister(source));
frame_access_state()->IncreaseSPDelta(new_slots); frame_access_state()->IncreaseSPDelta(new_slots);
} else if (source->IsStackSlot()) { } else if (source->IsStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ LoadWord(scratch, g.ToMemOperand(source)); __ LoadWord(scratch, g.ToMemOperand(source));
__ Push(scratch); __ Push(scratch);
@ -4798,7 +4798,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
if (dest->IsRegister()) { if (dest->IsRegister()) {
__ Pop(g.ToRegister(dest)); __ Pop(g.ToRegister(dest));
} else if (dest->IsStackSlot()) { } else if (dest->IsStackSlot()) {
UseScratchRegisterScope temps(tasm()); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ Pop(scratch); __ Pop(scratch);
__ StoreWord(scratch, g.ToMemOperand(dest)); __ StoreWord(scratch, g.ToMemOperand(dest));

View File

@ -744,7 +744,7 @@ int AssemblePopArgumentsAdoptFrameLatency() {
int AssertLatency() { return 1; } int AssertLatency() { return 1; }
int PrepareCallCFunctionLatency() { int PrepareCallCFunctionLatency() {
int frame_alignment = TurboAssembler::ActivationFrameAlignment(); int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (frame_alignment > kSystemPointerSize) { if (frame_alignment > kSystemPointerSize) {
return 1 + Sub64Latency(false) + AndLatency(false) + 1; return 1 + Sub64Latency(false) + AndLatency(false) + 1;
} else { } else {

View File

@ -65,7 +65,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) { selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta = ptrdiff_t const delta =
g.GetIntegerConstantValue(index) + g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference( MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue()); selector->isolate(), m.ResolvedValue());
// Check that the delta is a 32-bit integer due to the limitations of // Check that the delta is a 32-bit integer due to the limitations of
// immediate operands. // immediate operands.

View File

@ -168,7 +168,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) { selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta = ptrdiff_t const delta =
g.GetIntegerConstantValue(index) + g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference( MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.ResolvedValue()); selector->isolate(), m.ResolvedValue());
// Check that the delta is a 32-bit integer due to the limitations of // Check that the delta is a 32-bit integer due to the limitations of
// immediate operands. // immediate operands.

View File

@ -22,7 +22,7 @@ namespace v8 {
namespace internal { namespace internal {
namespace compiler { namespace compiler {
#define __ tasm()-> #define __ masm()->
#define kScratchReg ip #define kScratchReg ip
@ -619,7 +619,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
#define ASSEMBLE_FLOAT_MODULO() \ #define ASSEMBLE_FLOAT_MODULO() \
do { \ do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \ __ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \ __ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \ i.InputDoubleRegister(1)); \
@ -631,7 +631,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
do { \ do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \ /* and generate a CallAddress instruction instead. */ \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \ __ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
@ -643,7 +643,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
do { \ do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \ /* and generate a CallAddress instruction instead. */ \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(masm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \ __ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \ __ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \ i.InputDoubleRegister(1)); \
@ -1021,20 +1021,20 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace { namespace {
void FlushPendingPushRegisters(TurboAssembler* tasm, void FlushPendingPushRegisters(MacroAssembler* masm,
FrameAccessState* frame_access_state, FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) { ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) { switch (pending_pushes->size()) {
case 0: case 0:
break; break;
case 1: case 1:
tasm->Push((*pending_pushes)[0]); masm->Push((*pending_pushes)[0]);
break; break;
case 2: case 2:
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]); masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break; break;
case 3: case 3:
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1], masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]); (*pending_pushes)[2]);
break; break;
default: default:
@ -1045,7 +1045,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
} }
void AdjustStackPointerForTailCall( void AdjustStackPointerForTailCall(
TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp, MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr, ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) { bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() + int current_sp_offset = state->GetSPToFPSlotCount() +
@ -1053,15 +1053,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset; int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) { if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) { if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes); FlushPendingPushRegisters(masm, state, pending_pushes);
} }
tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) { } else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) { if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes); FlushPendingPushRegisters(masm, state, pending_pushes);
} }
tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta);
} }
} }
@ -1083,7 +1083,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination())); LocationOperand::cast(move->destination()));
InstructionOperand source(move->source()); InstructionOperand source(move->source());
AdjustStackPointerForTailCall( AdjustStackPointerForTailCall(
tasm(), frame_access_state(), masm(), frame_access_state(),
destination_location.index() - pending_pushes.size(), destination_location.index() - pending_pushes.size(),
&pending_pushes); &pending_pushes);
// Pushes of non-register data types are not supported. // Pushes of non-register data types are not supported.
@ -1093,20 +1093,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
// TODO(arm): We can push more than 3 registers at once. Add support in // TODO(arm): We can push more than 3 registers at once. Add support in
// the macro-assembler for pushing a list of registers. // the macro-assembler for pushing a list of registers.
if (pending_pushes.size() == 3) { if (pending_pushes.size() == 3) {
FlushPendingPushRegisters(tasm(), frame_access_state(), FlushPendingPushRegisters(masm(), frame_access_state(),
&pending_pushes); &pending_pushes);
} }
move->Eliminate(); move->Eliminate();
} }
FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes); FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
} }
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, nullptr, false); first_unused_slot_offset, nullptr, false);
} }
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) { int first_unused_slot_offset) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(), AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset); first_unused_slot_offset);
} }
@ -1218,7 +1218,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else { } else {
// We cannot use the constant pool to load the target since // We cannot use the constant pool to load the target since
// we've already restored the caller's frame. // we've already restored the caller's frame.
ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); ConstantPoolUnavailableScope constant_pool_unavailable(masm());
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} }
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
@ -1351,7 +1351,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{ {
// We don't actually want to generate a pile of code for this, so just // We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one. // claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
@ -3580,9 +3580,9 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ mov(argc_reg, Operand(parameter_slots)); __ mov(argc_reg, Operand(parameter_slots));
__ bind(&skip); __ bind(&skip);
} }
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger, __ DropArguments(argc_reg, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
} else if (additional_pop_count->IsImmediate()) { } else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32(); int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_slots + additional_count); __ Drop(parameter_slots + additional_count);

View File

@ -33,7 +33,7 @@ namespace v8 {
namespace internal { namespace internal {
namespace compiler { namespace compiler {
#define __ tasm()-> #define __ masm()->
// Adds X64 specific methods for decoding operands. // Adds X64 specific methods for decoding operands.
class X64OperandConverter : public InstructionOperandConverter { class X64OperandConverter : public InstructionOperandConverter {
@ -334,29 +334,29 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}; };
template <std::memory_order order> template <std::memory_order order>
int EmitStore(TurboAssembler* tasm, Operand operand, Register value, int EmitStore(MacroAssembler* masm, Operand operand, Register value,
MachineRepresentation rep) { MachineRepresentation rep) {
int store_instr_offset; int store_instr_offset;
if (order == std::memory_order_relaxed) { if (order == std::memory_order_relaxed) {
store_instr_offset = tasm->pc_offset(); store_instr_offset = masm->pc_offset();
switch (rep) { switch (rep) {
case MachineRepresentation::kWord8: case MachineRepresentation::kWord8:
tasm->movb(operand, value); masm->movb(operand, value);
break; break;
case MachineRepresentation::kWord16: case MachineRepresentation::kWord16:
tasm->movw(operand, value); masm->movw(operand, value);
break; break;
case MachineRepresentation::kWord32: case MachineRepresentation::kWord32:
tasm->movl(operand, value); masm->movl(operand, value);
break; break;
case MachineRepresentation::kWord64: case MachineRepresentation::kWord64:
tasm->movq(operand, value); masm->movq(operand, value);
break; break;
case MachineRepresentation::kTagged: case MachineRepresentation::kTagged:
tasm->StoreTaggedField(operand, value); masm->StoreTaggedField(operand, value);
break; break;
case MachineRepresentation::kSandboxedPointer: case MachineRepresentation::kSandboxedPointer:
tasm->StoreSandboxedPointerField(operand, value); masm->StoreSandboxedPointerField(operand, value);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -367,28 +367,28 @@ int EmitStore(TurboAssembler* tasm, Operand operand, Register value,
DCHECK_EQ(order, std::memory_order_seq_cst); DCHECK_EQ(order, std::memory_order_seq_cst);
switch (rep) { switch (rep) {
case MachineRepresentation::kWord8: case MachineRepresentation::kWord8:
tasm->movq(kScratchRegister, value); masm->movq(kScratchRegister, value);
store_instr_offset = tasm->pc_offset(); store_instr_offset = masm->pc_offset();
tasm->xchgb(kScratchRegister, operand); masm->xchgb(kScratchRegister, operand);
break; break;
case MachineRepresentation::kWord16: case MachineRepresentation::kWord16:
tasm->movq(kScratchRegister, value); masm->movq(kScratchRegister, value);
store_instr_offset = tasm->pc_offset(); store_instr_offset = masm->pc_offset();
tasm->xchgw(kScratchRegister, operand); masm->xchgw(kScratchRegister, operand);
break; break;
case MachineRepresentation::kWord32: case MachineRepresentation::kWord32:
tasm->movq(kScratchRegister, value); masm->movq(kScratchRegister, value);
store_instr_offset = tasm->pc_offset(); store_instr_offset = masm->pc_offset();
tasm->xchgl(kScratchRegister, operand); masm->xchgl(kScratchRegister, operand);
break; break;
case MachineRepresentation::kWord64: case MachineRepresentation::kWord64:
tasm->movq(kScratchRegister, value); masm->movq(kScratchRegister, value);
store_instr_offset = tasm->pc_offset(); store_instr_offset = masm->pc_offset();
tasm->xchgq(kScratchRegister, operand); masm->xchgq(kScratchRegister, operand);
break; break;
case MachineRepresentation::kTagged: case MachineRepresentation::kTagged:
store_instr_offset = tasm->pc_offset(); store_instr_offset = masm->pc_offset();
tasm->AtomicStoreTaggedField(operand, value); masm->AtomicStoreTaggedField(operand, value);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -397,29 +397,29 @@ int EmitStore(TurboAssembler* tasm, Operand operand, Register value,
} }
template <std::memory_order order> template <std::memory_order order>
int EmitStore(TurboAssembler* tasm, Operand operand, Immediate value, int EmitStore(MacroAssembler* masm, Operand operand, Immediate value,
MachineRepresentation rep); MachineRepresentation rep);
template <> template <>
int EmitStore<std::memory_order_relaxed>(TurboAssembler* tasm, Operand operand, int EmitStore<std::memory_order_relaxed>(MacroAssembler* masm, Operand operand,
Immediate value, Immediate value,
MachineRepresentation rep) { MachineRepresentation rep) {
int store_instr_offset = tasm->pc_offset(); int store_instr_offset = masm->pc_offset();
switch (rep) { switch (rep) {
case MachineRepresentation::kWord8: case MachineRepresentation::kWord8:
tasm->movb(operand, value); masm->movb(operand, value);
break; break;
case MachineRepresentation::kWord16: case MachineRepresentation::kWord16:
tasm->movw(operand, value); masm->movw(operand, value);
break; break;
case MachineRepresentation::kWord32: case MachineRepresentation::kWord32:
tasm->movl(operand, value); masm->movl(operand, value);
break; break;
case MachineRepresentation::kWord64: case MachineRepresentation::kWord64:
tasm->movq(operand, value); masm->movq(operand, value);
break; break;
case MachineRepresentation::kTagged: case MachineRepresentation::kTagged:
tasm->StoreTaggedField(operand, value); masm->StoreTaggedField(operand, value);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -509,7 +509,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY
#ifdef V8_IS_TSAN #ifdef V8_IS_TSAN
void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm, void EmitMemoryProbeForTrapHandlerIfNeeded(MacroAssembler* masm,
Register scratch, Operand operand, Register scratch, Operand operand,
StubCallMode mode, int size) { StubCallMode mode, int size) {
#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED #if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
@ -522,16 +522,16 @@ void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm,
mode == StubCallMode::kCallWasmRuntimeStub) { mode == StubCallMode::kCallWasmRuntimeStub) {
switch (size) { switch (size) {
case kInt8Size: case kInt8Size:
tasm->movb(scratch, operand); masm->movb(scratch, operand);
break; break;
case kInt16Size: case kInt16Size:
tasm->movw(scratch, operand); masm->movw(scratch, operand);
break; break;
case kInt32Size: case kInt32Size:
tasm->movl(scratch, operand); masm->movl(scratch, operand);
break; break;
case kInt64Size: case kInt64Size:
tasm->movq(scratch, operand); masm->movq(scratch, operand);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -569,14 +569,14 @@ class OutOfLineTSANStore : public OutOfLineCode {
// A direct call to a wasm runtime stub defined in this module. // A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code // Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space. // is added to the native module and copied into wasm code space.
tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_, masm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
StubCallMode::kCallWasmRuntimeStub, StubCallMode::kCallWasmRuntimeStub,
memory_order_); memory_order_);
return; return;
} }
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY
tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_, masm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
StubCallMode::kCallBuiltinPointer, memory_order_); StubCallMode::kCallBuiltinPointer, memory_order_);
} }
@ -592,7 +592,7 @@ class OutOfLineTSANStore : public OutOfLineCode {
Zone* zone_; Zone* zone_;
}; };
void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm, void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, MacroAssembler* masm,
Operand operand, Register value_reg, Operand operand, Register value_reg,
X64OperandConverter& i, StubCallMode mode, int size, X64OperandConverter& i, StubCallMode mode, int size,
std::memory_order order) { std::memory_order order) {
@ -606,45 +606,45 @@ void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm,
Register scratch0 = i.TempRegister(0); Register scratch0 = i.TempRegister(0);
auto tsan_ool = zone->New<OutOfLineTSANStore>(codegen, operand, value_reg, auto tsan_ool = zone->New<OutOfLineTSANStore>(codegen, operand, value_reg,
scratch0, mode, size, order); scratch0, mode, size, order);
tasm->jmp(tsan_ool->entry()); masm->jmp(tsan_ool->entry());
tasm->bind(tsan_ool->exit()); masm->bind(tsan_ool->exit());
} }
template <std::memory_order order> template <std::memory_order order>
Register GetTSANValueRegister(TurboAssembler* tasm, Register value, Register GetTSANValueRegister(MacroAssembler* masm, Register value,
X64OperandConverter& i, X64OperandConverter& i,
MachineRepresentation rep) { MachineRepresentation rep) {
if (rep == MachineRepresentation::kSandboxedPointer) { if (rep == MachineRepresentation::kSandboxedPointer) {
// SandboxedPointers need to be encoded. // SandboxedPointers need to be encoded.
Register value_reg = i.TempRegister(1); Register value_reg = i.TempRegister(1);
tasm->movq(value_reg, value); masm->movq(value_reg, value);
tasm->EncodeSandboxedPointer(value_reg); masm->EncodeSandboxedPointer(value_reg);
return value_reg; return value_reg;
} }
return value; return value;
} }
template <std::memory_order order> template <std::memory_order order>
Register GetTSANValueRegister(TurboAssembler* tasm, Immediate value, Register GetTSANValueRegister(MacroAssembler* masm, Immediate value,
X64OperandConverter& i, X64OperandConverter& i,
MachineRepresentation rep); MachineRepresentation rep);
template <> template <>
Register GetTSANValueRegister<std::memory_order_relaxed>( Register GetTSANValueRegister<std::memory_order_relaxed>(
TurboAssembler* tasm, Immediate value, X64OperandConverter& i, MacroAssembler* masm, Immediate value, X64OperandConverter& i,
MachineRepresentation rep) { MachineRepresentation rep) {
Register value_reg = i.TempRegister(1); Register value_reg = i.TempRegister(1);
tasm->movq(value_reg, value); masm->movq(value_reg, value);
if (rep == MachineRepresentation::kSandboxedPointer) { if (rep == MachineRepresentation::kSandboxedPointer) {
// SandboxedPointers need to be encoded. // SandboxedPointers need to be encoded.
tasm->EncodeSandboxedPointer(value_reg); masm->EncodeSandboxedPointer(value_reg);
} }
return value_reg; return value_reg;
} }
template <std::memory_order order, typename ValueT> template <std::memory_order order, typename ValueT>
void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen, void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand, ValueT value, MacroAssembler* masm, Operand operand, ValueT value,
X64OperandConverter& i, StubCallMode stub_call_mode, X64OperandConverter& i, StubCallMode stub_call_mode,
MachineRepresentation rep, Instruction* instr) { MachineRepresentation rep, Instruction* instr) {
// The FOR_TESTING code doesn't initialize the root register. We can't call // The FOR_TESTING code doesn't initialize the root register. We can't call
@ -654,17 +654,17 @@ void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
// path. It is not crucial, but it would be nice to remove this restriction. // path. It is not crucial, but it would be nice to remove this restriction.
if (codegen->code_kind() != CodeKind::FOR_TESTING) { if (codegen->code_kind() != CodeKind::FOR_TESTING) {
if (instr->HasMemoryAccessMode()) { if (instr->HasMemoryAccessMode()) {
EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), instr,
instr, tasm->pc_offset()); masm->pc_offset());
} }
int size = ElementSizeInBytes(rep); int size = ElementSizeInBytes(rep);
EmitMemoryProbeForTrapHandlerIfNeeded(tasm, i.TempRegister(0), operand, EmitMemoryProbeForTrapHandlerIfNeeded(masm, i.TempRegister(0), operand,
stub_call_mode, size); stub_call_mode, size);
Register value_reg = GetTSANValueRegister<order>(tasm, value, i, rep); Register value_reg = GetTSANValueRegister<order>(masm, value, i, rep);
EmitTSANStoreOOL(zone, codegen, tasm, operand, value_reg, i, stub_call_mode, EmitTSANStoreOOL(zone, codegen, masm, operand, value_reg, i, stub_call_mode,
size, order); size, order);
} else { } else {
int store_instr_offset = EmitStore<order>(tasm, operand, value, rep); int store_instr_offset = EmitStore<order>(masm, operand, value, rep);
if (instr->HasMemoryAccessMode()) { if (instr->HasMemoryAccessMode()) {
EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(),
instr, store_instr_offset); instr, store_instr_offset);
@ -718,7 +718,7 @@ class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
}; };
void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand, MacroAssembler* masm, Operand operand,
X64OperandConverter& i, StubCallMode mode, X64OperandConverter& i, StubCallMode mode,
int size) { int size) {
// The FOR_TESTING code doesn't initialize the root register. We can't call // The FOR_TESTING code doesn't initialize the root register. We can't call
@ -731,26 +731,26 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
Register scratch0 = i.TempRegister(0); Register scratch0 = i.TempRegister(0);
auto tsan_ool = zone->New<OutOfLineTSANRelaxedLoad>(codegen, operand, auto tsan_ool = zone->New<OutOfLineTSANRelaxedLoad>(codegen, operand,
scratch0, mode, size); scratch0, mode, size);
tasm->jmp(tsan_ool->entry()); masm->jmp(tsan_ool->entry());
tasm->bind(tsan_ool->exit()); masm->bind(tsan_ool->exit());
} }
#else #else
template <std::memory_order order, typename ValueT> template <std::memory_order order, typename ValueT>
void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen, void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand, ValueT value, MacroAssembler* masm, Operand operand, ValueT value,
X64OperandConverter& i, StubCallMode stub_call_mode, X64OperandConverter& i, StubCallMode stub_call_mode,
MachineRepresentation rep, Instruction* instr) { MachineRepresentation rep, Instruction* instr) {
DCHECK(order == std::memory_order_relaxed || DCHECK(order == std::memory_order_relaxed ||
order == std::memory_order_seq_cst); order == std::memory_order_seq_cst);
int store_instr_off = EmitStore<order>(tasm, operand, value, rep); int store_instr_off = EmitStore<order>(masm, operand, value, rep);
if (instr->HasMemoryAccessMode()) { if (instr->HasMemoryAccessMode()) {
EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), instr, store_instr_off); EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), instr, store_instr_off);
} }
} }
void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand, MacroAssembler* masm, Operand operand,
X64OperandConverter& i, StubCallMode mode, X64OperandConverter& i, StubCallMode mode,
int size) {} int size) {}
#endif // V8_IS_TSAN #endif // V8_IS_TSAN
@ -923,7 +923,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
#define ASSEMBLE_AVX_BINOP(asm_instr) \ #define ASSEMBLE_AVX_BINOP(asm_instr) \
do { \ do { \
CpuFeatureScope avx_scope(tasm(), AVX); \ CpuFeatureScope avx_scope(masm(), AVX); \
if (HasAddressingMode(instr)) { \ if (HasAddressingMode(instr)) { \
size_t index = 1; \ size_t index = 1; \
Operand right = i.MemoryOperand(&index); \ Operand right = i.MemoryOperand(&index); \
@ -983,7 +983,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
#define ASSEMBLE_SIMD_BINOP(opcode) \ #define ASSEMBLE_SIMD_BINOP(opcode) \
do { \ do { \
if (CpuFeatures::IsSupported(AVX)) { \ if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(tasm(), AVX); \ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \ __ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \
i.InputSimd128Register(1)); \ i.InputSimd128Register(1)); \
} else { \ } else { \
@ -1015,7 +1015,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
XMMRegister dst = i.OutputSimd128Register(); \ XMMRegister dst = i.OutputSimd128Register(); \
byte input_index = instr->InputCount() == 2 ? 1 : 0; \ byte input_index = instr->InputCount() == 2 ? 1 : 0; \
if (CpuFeatures::IsSupported(AVX)) { \ if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(tasm(), AVX); \ CpuFeatureScope avx_scope(masm(), AVX); \
DCHECK(instr->InputAt(input_index)->IsSimd128Register()); \ DCHECK(instr->InputAt(input_index)->IsSimd128Register()); \
__ v##opcode(dst, i.InputSimd128Register(0), \ __ v##opcode(dst, i.InputSimd128Register(0), \
i.InputSimd128Register(input_index)); \ i.InputSimd128Register(input_index)); \
@ -1030,7 +1030,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
XMMRegister dst = i.OutputSimd128Register(); \ XMMRegister dst = i.OutputSimd128Register(); \
XMMRegister src = i.InputSimd128Register(0); \ XMMRegister src = i.InputSimd128Register(0); \
if (CpuFeatures::IsSupported(AVX)) { \ if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(tasm(), AVX); \ CpuFeatureScope avx_scope(masm(), AVX); \
DCHECK(instr->InputAt(1)->IsSimd128Register()); \ DCHECK(instr->InputAt(1)->IsSimd128Register()); \
__ v##opcode(dst, src, i.InputSimd128Register(1), imm); \ __ v##opcode(dst, src, i.InputSimd128Register(1), imm); \
} else { \ } else { \
@ -1061,7 +1061,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
XMMRegister dst = i.OutputSimd128Register(); \ XMMRegister dst = i.OutputSimd128Register(); \
if (HasImmediateInput(instr, 1)) { \ if (HasImmediateInput(instr, 1)) { \
if (CpuFeatures::IsSupported(AVX)) { \ if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(tasm(), AVX); \ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##opcode(dst, i.InputSimd128Register(0), \ __ v##opcode(dst, i.InputSimd128Register(0), \
byte{i.InputInt##width(1)}); \ byte{i.InputInt##width(1)}); \
} else { \ } else { \
@ -1074,7 +1074,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
__ andq(kScratchRegister, Immediate(mask)); \ __ andq(kScratchRegister, Immediate(mask)); \
__ Movq(kScratchDoubleReg, kScratchRegister); \ __ Movq(kScratchDoubleReg, kScratchRegister); \
if (CpuFeatures::IsSupported(AVX)) { \ if (CpuFeatures::IsSupported(AVX)) { \
CpuFeatureScope avx_scope(tasm(), AVX); \ CpuFeatureScope avx_scope(masm(), AVX); \
__ v##opcode(dst, i.InputSimd128Register(0), kScratchDoubleReg); \ __ v##opcode(dst, i.InputSimd128Register(0), kScratchDoubleReg); \
} else { \ } else { \
DCHECK_EQ(dst, i.InputSimd128Register(0)); \ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
@ -1102,13 +1102,13 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, load_offset); \ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, load_offset); \
} while (false) } while (false)
#define ASSEMBLE_SEQ_CST_STORE(rep) \ #define ASSEMBLE_SEQ_CST_STORE(rep) \
do { \ do { \
Register value = i.InputRegister(0); \ Register value = i.InputRegister(0); \
Operand operand = i.MemoryOperand(1); \ Operand operand = i.MemoryOperand(1); \
EmitTSANAwareStore<std::memory_order_seq_cst>( \ EmitTSANAwareStore<std::memory_order_seq_cst>( \
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), \ zone(), this, masm(), operand, value, i, DetermineStubCallMode(), rep, \
rep, instr); \ instr); \
} while (false) } while (false)
void CodeGenerator::AssembleDeconstructFrame() { void CodeGenerator::AssembleDeconstructFrame() {
@ -1127,7 +1127,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace { namespace {
void AdjustStackPointerForTailCall(Instruction* instr, void AdjustStackPointerForTailCall(Instruction* instr,
TurboAssembler* assembler, Linkage* linkage, MacroAssembler* assembler, Linkage* linkage,
OptimizedCompilationInfo* info, OptimizedCompilationInfo* info,
FrameAccessState* state, FrameAccessState* state,
int new_slot_above_sp, int new_slot_above_sp,
@ -1163,7 +1163,7 @@ void AdjustStackPointerForTailCall(Instruction* instr,
} }
} }
void SetupSimdImmediateInRegister(TurboAssembler* assembler, uint32_t* imms, void SetupSimdImmediateInRegister(MacroAssembler* assembler, uint32_t* imms,
XMMRegister reg) { XMMRegister reg) {
assembler->Move(reg, make_uint64(imms[3], imms[2]), assembler->Move(reg, make_uint64(imms[3], imms[2]),
make_uint64(imms[1], imms[0])); make_uint64(imms[1], imms[0]));
@ -1186,7 +1186,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand destination_location( LocationOperand destination_location(
LocationOperand::cast(move->destination())); LocationOperand::cast(move->destination()));
InstructionOperand source(move->source()); InstructionOperand source(move->source());
AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(), AdjustStackPointerForTailCall(instr, masm(), linkage(), info(),
frame_access_state(), frame_access_state(),
destination_location.index()); destination_location.index());
if (source.IsStackSlot()) { if (source.IsStackSlot()) {
@ -1205,14 +1205,14 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
move->Eliminate(); move->Eliminate();
} }
} }
AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(), AdjustStackPointerForTailCall(instr, masm(), linkage(), info(),
frame_access_state(), first_unused_slot_offset, frame_access_state(), first_unused_slot_offset,
false); false);
} }
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) { int first_unused_slot_offset) {
AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(), AdjustStackPointerForTailCall(instr, masm(), linkage(), info(),
frame_access_state(), first_unused_slot_offset); frame_access_state(), first_unused_slot_offset);
} }
@ -1464,7 +1464,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{ {
// We don't actually want to generate a pile of code for this, so just // We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one. // claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(BUILTIN_CODE(isolate(), AbortCSADcheck), __ Call(BUILTIN_CODE(isolate(), AbortCSADcheck),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
@ -1561,12 +1561,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DetermineStubCallMode()); DetermineStubCallMode());
if (arch_opcode == kArchStoreWithWriteBarrier) { if (arch_opcode == kArchStoreWithWriteBarrier) {
EmitTSANAwareStore<std::memory_order_relaxed>( EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged, instr); MachineRepresentation::kTagged, instr);
} else { } else {
DCHECK_EQ(arch_opcode, kArchAtomicStoreWithWriteBarrier); DCHECK_EQ(arch_opcode, kArchAtomicStoreWithWriteBarrier);
EmitTSANAwareStore<std::memory_order_seq_cst>( EmitTSANAwareStore<std::memory_order_seq_cst>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged, instr); MachineRepresentation::kTagged, instr);
} }
if (mode > RecordWriteMode::kValueIsPointer) { if (mode > RecordWriteMode::kValueIsPointer) {
@ -1873,7 +1873,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SSE_UNOP(Cvtss2sd); ASSEMBLE_SSE_UNOP(Cvtss2sd);
break; break;
case kSSEFloat32Round: { case kSSEFloat32Round: {
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
RoundingMode const mode = RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode())); static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@ -1930,7 +1930,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// The following 2 instruction implicitly use rax. // The following 2 instruction implicitly use rax.
__ fnstsw_ax(); __ fnstsw_ax();
if (CpuFeatures::IsSupported(SAHF)) { if (CpuFeatures::IsSupported(SAHF)) {
CpuFeatureScope sahf_scope(tasm(), SAHF); CpuFeatureScope sahf_scope(masm(), SAHF);
__ sahf(); __ sahf();
} else { } else {
__ shrl(rax, Immediate(8)); __ shrl(rax, Immediate(8));
@ -2066,7 +2066,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SSE_UNOP(Sqrtsd); ASSEMBLE_SSE_UNOP(Sqrtsd);
break; break;
case kSSEFloat64Round: { case kSSEFloat64Round: {
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
RoundingMode const mode = RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode())); static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); __ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@ -2389,7 +2389,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
break; break;
case kAVXFloat32Cmp: { case kAVXFloat32Cmp: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
if (instr->InputAt(1)->IsFPRegister()) { if (instr->InputAt(1)->IsFPRegister()) {
__ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else { } else {
@ -2413,7 +2413,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister()); __ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break; break;
case kAVXFloat64Cmp: { case kAVXFloat64Cmp: {
CpuFeatureScope avx_scope(tasm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
if (instr->InputAt(1)->IsFPRegister()) { if (instr->InputAt(1)->IsFPRegister()) {
__ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else { } else {
@ -2487,12 +2487,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasImmediateInput(instr, index)) { if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt8(index))); Immediate value(Immediate(i.InputInt8(index)));
EmitTSANAwareStore<std::memory_order_relaxed>( EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord8, instr); MachineRepresentation::kWord8, instr);
} else { } else {
Register value(i.InputRegister(index)); Register value(i.InputRegister(index));
EmitTSANAwareStore<std::memory_order_relaxed>( EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord8, instr); MachineRepresentation::kWord8, instr);
} }
break; break;
@ -2522,12 +2522,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasImmediateInput(instr, index)) { if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt16(index))); Immediate value(Immediate(i.InputInt16(index)));
EmitTSANAwareStore<std::memory_order_relaxed>( EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord16, instr); MachineRepresentation::kWord16, instr);
} else { } else {
Register value(i.InputRegister(index)); Register value(i.InputRegister(index));
EmitTSANAwareStore<std::memory_order_relaxed>( EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord16, instr); MachineRepresentation::kWord16, instr);
} }
break; break;
@ -2538,7 +2538,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasAddressingMode(instr)) { if (HasAddressingMode(instr)) {
Operand address(i.MemoryOperand()); Operand address(i.MemoryOperand());
__ movl(i.OutputRegister(), address); __ movl(i.OutputRegister(), address);
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
DetermineStubCallMode(), kInt32Size); DetermineStubCallMode(), kInt32Size);
} else { } else {
if (HasRegisterInput(instr, 0)) { if (HasRegisterInput(instr, 0)) {
@ -2554,12 +2554,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasImmediateInput(instr, index)) { if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index)); Immediate value(i.InputImmediate(index));
EmitTSANAwareStore<std::memory_order_relaxed>( EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord32, instr); MachineRepresentation::kWord32, instr);
} else { } else {
Register value(i.InputRegister(index)); Register value(i.InputRegister(index));
EmitTSANAwareStore<std::memory_order_relaxed>( EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord32, instr); MachineRepresentation::kWord32, instr);
} }
} }
@ -2572,7 +2572,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(instr->HasOutput()); CHECK(instr->HasOutput());
Operand address(i.MemoryOperand()); Operand address(i.MemoryOperand());
__ DecompressTaggedSigned(i.OutputRegister(), address); __ DecompressTaggedSigned(i.OutputRegister(), address);
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
DetermineStubCallMode(), kTaggedSize); DetermineStubCallMode(), kTaggedSize);
break; break;
} }
@ -2580,7 +2580,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(instr->HasOutput()); CHECK(instr->HasOutput());
Operand address(i.MemoryOperand()); Operand address(i.MemoryOperand());
__ DecompressTaggedPointer(i.OutputRegister(), address); __ DecompressTaggedPointer(i.OutputRegister(), address);
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
DetermineStubCallMode(), kTaggedSize); DetermineStubCallMode(), kTaggedSize);
break; break;
} }
@ -2588,7 +2588,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(instr->HasOutput()); CHECK(instr->HasOutput());
Operand address(i.MemoryOperand()); Operand address(i.MemoryOperand());
__ DecompressAnyTagged(i.OutputRegister(), address); __ DecompressAnyTagged(i.OutputRegister(), address);
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
DetermineStubCallMode(), kTaggedSize); DetermineStubCallMode(), kTaggedSize);
break; break;
} }
@ -2599,12 +2599,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasImmediateInput(instr, index)) { if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index)); Immediate value(i.InputImmediate(index));
EmitTSANAwareStore<std::memory_order_relaxed>( EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged, instr); MachineRepresentation::kTagged, instr);
} else { } else {
Register value(i.InputRegister(index)); Register value(i.InputRegister(index));
EmitTSANAwareStore<std::memory_order_relaxed>( EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kTagged, instr); MachineRepresentation::kTagged, instr);
} }
break; break;
@ -2615,7 +2615,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register dst = i.OutputRegister(); Register dst = i.OutputRegister();
__ movq(dst, address); __ movq(dst, address);
__ DecodeSandboxedPointer(dst); __ DecodeSandboxedPointer(dst);
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
DetermineStubCallMode(), DetermineStubCallMode(),
kSystemPointerSize); kSystemPointerSize);
break; break;
@ -2627,7 +2627,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!HasImmediateInput(instr, index)); CHECK(!HasImmediateInput(instr, index));
Register value(i.InputRegister(index)); Register value(i.InputRegister(index));
EmitTSANAwareStore<std::memory_order_relaxed>( EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kSandboxedPointer, instr); MachineRepresentation::kSandboxedPointer, instr);
break; break;
} }
@ -2636,7 +2636,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
Operand address(i.MemoryOperand()); Operand address(i.MemoryOperand());
__ movq(i.OutputRegister(), address); __ movq(i.OutputRegister(), address);
EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i,
DetermineStubCallMode(), kInt64Size); DetermineStubCallMode(), kInt64Size);
} else { } else {
size_t index = 0; size_t index = 0;
@ -2644,12 +2644,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasImmediateInput(instr, index)) { if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index)); Immediate value(i.InputImmediate(index));
EmitTSANAwareStore<std::memory_order_relaxed>( EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord64, instr); MachineRepresentation::kWord64, instr);
} else { } else {
Register value(i.InputRegister(index)); Register value(i.InputRegister(index));
EmitTSANAwareStore<std::memory_order_relaxed>( EmitTSANAwareStore<std::memory_order_relaxed>(
zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), zone(), this, masm(), operand, value, i, DetermineStubCallMode(),
MachineRepresentation::kWord64, instr); MachineRepresentation::kWord64, instr);
} }
} }
@ -3206,7 +3206,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kX64I64x2Eq: { case kX64I64x2Eq: {
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(masm(), SSE4_1);
ASSEMBLE_SIMD_BINOP(pcmpeqq); ASSEMBLE_SIMD_BINOP(pcmpeqq);
break; break;
} }
@ -3486,7 +3486,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
for (int j = 0; j < 4; j++) { for (int j = 0; j < 4; j++) {
imm[j] = i.InputUint32(j); imm[j] = i.InputUint32(j);
} }
SetupSimdImmediateInRegister(tasm(), imm, dst); SetupSimdImmediateInRegister(masm(), imm, dst);
break; break;
} }
case kX64S128Zero: { case kX64S128Zero: {
@ -3994,7 +3994,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
mask[j - 1] = i.InputUint32(j); mask[j - 1] = i.InputUint32(j);
} }
SetupSimdImmediateInRegister(tasm(), mask, tmp_simd); SetupSimdImmediateInRegister(masm(), mask, tmp_simd);
__ Pshufb(dst, tmp_simd); __ Pshufb(dst, tmp_simd);
} else { // two input operands } else { // two input operands
DCHECK_NE(tmp_simd, i.InputSimd128Register(1)); DCHECK_NE(tmp_simd, i.InputSimd128Register(1));
@ -4008,7 +4008,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
mask1[j - 2] |= (lane < kSimd128Size ? lane : 0x80) << k; mask1[j - 2] |= (lane < kSimd128Size ? lane : 0x80) << k;
} }
} }
SetupSimdImmediateInRegister(tasm(), mask1, tmp_simd); SetupSimdImmediateInRegister(masm(), mask1, tmp_simd);
__ Pshufb(kScratchDoubleReg, tmp_simd); __ Pshufb(kScratchDoubleReg, tmp_simd);
uint32_t mask2[4] = {}; uint32_t mask2[4] = {};
if (instr->InputAt(1)->IsSimd128Register()) { if (instr->InputAt(1)->IsSimd128Register()) {
@ -4024,7 +4024,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
mask2[j - 2] |= (lane >= kSimd128Size ? (lane & 0x0F) : 0x80) << k; mask2[j - 2] |= (lane >= kSimd128Size ? (lane & 0x0F) : 0x80) << k;
} }
} }
SetupSimdImmediateInRegister(tasm(), mask2, tmp_simd); SetupSimdImmediateInRegister(masm(), mask2, tmp_simd);
__ Pshufb(dst, tmp_simd); __ Pshufb(dst, tmp_simd);
__ Por(dst, kScratchDoubleReg); __ Por(dst, kScratchDoubleReg);
} }
@ -5057,8 +5057,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ j(greater, &mismatch_return, Label::kNear); __ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg); __ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return); __ bind(&mismatch_return);
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger, __ DropArguments(argc_reg, scratch_reg, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
// We use a return instead of a jump for better return address prediction. // We use a return instead of a jump for better return address prediction.
__ Ret(); __ Ret();
} else if (additional_pop_count->IsImmediate()) { } else if (additional_pop_count->IsImmediate()) {
@ -5082,7 +5082,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
} }
} }
void CodeGenerator::FinishCode() { tasm()->PatchConstPool(); } void CodeGenerator::FinishCode() { masm()->PatchConstPool(); }
void CodeGenerator::PrepareForDeoptimizationExits( void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {} ZoneDeque<DeoptimizationExit*>* exits) {}

View File

@ -224,7 +224,7 @@ class X64OperandGenerator final : public OperandGenerator {
m.object().ResolvedValue())) { m.object().ResolvedValue())) {
ptrdiff_t const delta = ptrdiff_t const delta =
m.index().ResolvedValue() + m.index().ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference( MacroAssemblerBase::RootRegisterOffsetForExternalReference(
selector()->isolate(), m.object().ResolvedValue()); selector()->isolate(), m.object().ResolvedValue());
if (is_int32(delta)) { if (is_int32(delta)) {
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta)); inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
@ -2538,7 +2538,7 @@ void VisitWord64EqualImpl(InstructionSelector* selector, Node* node,
return VisitCompare( return VisitCompare(
selector, opcode, selector, opcode,
g.TempImmediate( g.TempImmediate(
TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), MacroAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
g.UseRegister(m.left().node()), cont); g.UseRegister(m.left().node()), cont);
} }
} }
@ -2576,7 +2576,7 @@ void VisitWord32EqualImpl(InstructionSelector* selector, Node* node,
return VisitCompare( return VisitCompare(
selector, opcode, selector, opcode,
g.TempImmediate( g.TempImmediate(
TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), MacroAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
g.UseRegister(left), cont); g.UseRegister(left), cont);
} }
} }

View File

@ -84,7 +84,7 @@ BasicBlockProfilerData* BasicBlockInstrumentor::Instrument(
// PatchBasicBlockCountersReference). An important and subtle point: we // PatchBasicBlockCountersReference). An important and subtle point: we
// cannot use the root handle basic_block_counters_marker_handle() and must // cannot use the root handle basic_block_counters_marker_handle() and must
// create a new separate handle. Otherwise // create a new separate handle. Otherwise
// TurboAssemblerBase::IndirectLoadConstant would helpfully emit a // MacroAssemblerBase::IndirectLoadConstant would helpfully emit a
// root-relative load rather than putting this value in the constants table // root-relative load rather than putting this value in the constants table
// where we expect it to be for patching. // where we expect it to be for patching.
counters_array = graph->NewNode(common.HeapConstant(Handle<HeapObject>::New( counters_array = graph->NewNode(common.HeapConstant(Handle<HeapObject>::New(

View File

@ -3473,10 +3473,10 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
CodeGenerator* code_generator = pipeline.code_generator(); CodeGenerator* code_generator = pipeline.code_generator();
wasm::WasmCompilationResult result; wasm::WasmCompilationResult result;
code_generator->tasm()->GetCode( code_generator->masm()->GetCode(
nullptr, &result.code_desc, code_generator->safepoint_table_builder(), nullptr, &result.code_desc, code_generator->safepoint_table_builder(),
static_cast<int>(code_generator->handler_table_offset())); static_cast<int>(code_generator->handler_table_offset()));
result.instr_buffer = code_generator->tasm()->ReleaseBuffer(); result.instr_buffer = code_generator->masm()->ReleaseBuffer();
result.source_positions = code_generator->GetSourcePositionTable(); result.source_positions = code_generator->GetSourcePositionTable();
result.protected_instructions_data = result.protected_instructions_data =
code_generator->GetProtectedInstructionsData(); code_generator->GetProtectedInstructionsData();
@ -3702,11 +3702,11 @@ void Pipeline::GenerateCodeForWasmFunction(
auto result = std::make_unique<wasm::WasmCompilationResult>(); auto result = std::make_unique<wasm::WasmCompilationResult>();
CodeGenerator* code_generator = pipeline.code_generator(); CodeGenerator* code_generator = pipeline.code_generator();
code_generator->tasm()->GetCode( code_generator->masm()->GetCode(
nullptr, &result->code_desc, code_generator->safepoint_table_builder(), nullptr, &result->code_desc, code_generator->safepoint_table_builder(),
static_cast<int>(code_generator->handler_table_offset())); static_cast<int>(code_generator->handler_table_offset()));
result->instr_buffer = code_generator->tasm()->ReleaseBuffer(); result->instr_buffer = code_generator->masm()->ReleaseBuffer();
result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount(); result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
result->tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots(); result->tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
result->source_positions = code_generator->GetSourcePositionTable(); result->source_positions = code_generator->GetSourcePositionTable();

View File

@ -447,7 +447,7 @@ void InitUnwindingRecord(Record* record, size_t code_size_in_bytes) {
// Hardcoded thunk. // Hardcoded thunk.
AssemblerOptions options; AssemblerOptions options;
options.record_reloc_info_for_serialization = false; options.record_reloc_info_for_serialization = false;
TurboAssembler masm(nullptr, options, CodeObjectRequired::kNo, MacroAssembler masm(nullptr, options, CodeObjectRequired::kNo,
NewAssemblerBuffer(64)); NewAssemblerBuffer(64));
masm.Mov(x16, masm.Mov(x16,
Operand(reinterpret_cast<uint64_t>(&CRASH_HANDLER_FUNCTION_NAME))); Operand(reinterpret_cast<uint64_t>(&CRASH_HANDLER_FUNCTION_NAME)));

View File

@ -215,12 +215,12 @@ class IsolateData final {
// runtime checks. // runtime checks.
void* embedder_data_[Internals::kNumIsolateDataSlots] = {}; void* embedder_data_[Internals::kNumIsolateDataSlots] = {};
// Stores the state of the caller for TurboAssembler::CallCFunction so that // Stores the state of the caller for MacroAssembler::CallCFunction so that
// the sampling CPU profiler can iterate the stack during such calls. These // the sampling CPU profiler can iterate the stack during such calls. These
// are stored on IsolateData so that they can be stored to with only one move // are stored on IsolateData so that they can be stored to with only one move
// instruction in compiled code. // instruction in compiled code.
// //
// The FP and PC that are saved right before TurboAssembler::CallCFunction. // The FP and PC that are saved right before MacroAssembler::CallCFunction.
Address fast_c_call_caller_fp_ = kNullAddress; Address fast_c_call_caller_fp_ = kNullAddress;
Address fast_c_call_caller_pc_ = kNullAddress; Address fast_c_call_caller_pc_ = kNullAddress;
// The address of the fast API callback right before it's executed from // The address of the fast API callback right before it's executed from

View File

@ -2302,7 +2302,7 @@ void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
__ LeaveFrame(StackFrame::MAGLEV); __ LeaveFrame(StackFrame::MAGLEV);
// Drop receiver + arguments according to dynamic arguments size. // Drop receiver + arguments according to dynamic arguments size.
__ DropArguments(params_size, TurboAssembler::kCountIncludesReceiver); __ DropArguments(params_size, MacroAssembler::kCountIncludesReceiver);
__ Ret(); __ Ret();
} }

View File

@ -2342,8 +2342,8 @@ void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
__ bind(&drop_dynamic_arg_size); __ bind(&drop_dynamic_arg_size);
// Drop receiver + arguments according to dynamic arguments size. // Drop receiver + arguments according to dynamic arguments size.
__ DropArguments(actual_params_size, r9, TurboAssembler::kCountIsInteger, __ DropArguments(actual_params_size, r9, MacroAssembler::kCountIsInteger,
TurboAssembler::kCountIncludesReceiver); MacroAssembler::kCountIncludesReceiver);
__ Ret(); __ Ret();
} }

View File

@ -814,7 +814,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
DCHECK_EQ(registers_to_retain.Count(), kNumCalleeSavedRegisters); DCHECK_EQ(registers_to_retain.Count(), kNumCalleeSavedRegisters);
__ PushCPURegList(registers_to_retain); __ PushCPURegList(registers_to_retain);
__ Push<TurboAssembler::kSignLR>(lr, fp); __ Push<MacroAssembler::kSignLR>(lr, fp);
__ PushCPURegList(argument_registers); __ PushCPURegList(argument_registers);
// Set frame pointer in place. // Set frame pointer in place.
@ -1125,7 +1125,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Set stack pointer back to first register to retain. // Set stack pointer back to first register to retain.
__ Mov(sp, fp); __ Mov(sp, fp);
__ Pop<TurboAssembler::kAuthLR>(fp, lr); __ Pop<MacroAssembler::kAuthLR>(fp, lr);
// Restore registers. // Restore registers.
__ PopCPURegList(registers_to_retain); __ PopCPURegList(registers_to_retain);
@ -1656,14 +1656,14 @@ void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
void RegExpMacroAssemblerARM64::RestoreLinkRegister() { void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
// TODO(v8:10026): Remove when we stop compacting for code objects that are // TODO(v8:10026): Remove when we stop compacting for code objects that are
// active on the call stack. // active on the call stack.
__ Pop<TurboAssembler::kAuthLR>(padreg, lr); __ Pop<MacroAssembler::kAuthLR>(padreg, lr);
__ Add(lr, lr, Operand(masm_->CodeObject())); __ Add(lr, lr, Operand(masm_->CodeObject()));
} }
void RegExpMacroAssemblerARM64::SaveLinkRegister() { void RegExpMacroAssemblerARM64::SaveLinkRegister() {
__ Sub(lr, lr, Operand(masm_->CodeObject())); __ Sub(lr, lr, Operand(masm_->CodeObject()));
__ Push<TurboAssembler::kSignLR>(lr, padreg); __ Push<MacroAssembler::kSignLR>(lr, padreg);
} }

View File

@ -162,7 +162,7 @@ inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
LeaveCC, al); LeaveCC, al);
} }
template <void (TurboAssembler::*op)(Register, Register, Register, Register, template <void (MacroAssembler::*op)(Register, Register, Register, Register,
Register), Register),
bool is_left_shift> bool is_left_shift>
inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst, inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst,
@ -184,7 +184,7 @@ inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst,
Register* later_src_reg = is_left_shift ? &src_low : &src_high; Register* later_src_reg = is_left_shift ? &src_low : &src_high;
if (*later_src_reg == clobbered_dst_reg) { if (*later_src_reg == clobbered_dst_reg) {
*later_src_reg = assm->GetUnusedRegister(kGpReg, pinned).gp(); *later_src_reg = assm->GetUnusedRegister(kGpReg, pinned).gp();
assm->TurboAssembler::Move(*later_src_reg, clobbered_dst_reg); assm->MacroAssembler::Move(*later_src_reg, clobbered_dst_reg);
} }
(assm->*op)(dst_low, dst_high, src_low, src_high, amount_capped); (assm->*op)(dst_low, dst_high, src_low, src_high, amount_capped);
@ -210,14 +210,14 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst,
MinOrMax min_or_max) { MinOrMax min_or_max) {
DCHECK(RegisterType::kSizeInBytes == 4 || RegisterType::kSizeInBytes == 8); DCHECK(RegisterType::kSizeInBytes == 4 || RegisterType::kSizeInBytes == 8);
if (lhs == rhs) { if (lhs == rhs) {
assm->TurboAssembler::Move(dst, lhs); assm->MacroAssembler::Move(dst, lhs);
return; return;
} }
Label done, is_nan; Label done, is_nan;
if (min_or_max == MinOrMax::kMin) { if (min_or_max == MinOrMax::kMin) {
assm->TurboAssembler::FloatMin(dst, lhs, rhs, &is_nan); assm->MacroAssembler::FloatMin(dst, lhs, rhs, &is_nan);
} else { } else {
assm->TurboAssembler::FloatMax(dst, lhs, rhs, &is_nan); assm->MacroAssembler::FloatMax(dst, lhs, rhs, &is_nan);
} }
assm->b(&done); assm->b(&done);
assm->bind(&is_nan); assm->bind(&is_nan);
@ -547,7 +547,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
bind(&continuation); bind(&continuation);
// Now allocate the stack space. Note that this might do more than just // Now allocate the stack space. Note that this might do more than just
// decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}.
AllocateStackSpace(frame_size); AllocateStackSpace(frame_size);
// Jump back to the start of the function, from {pc_offset()} to // Jump back to the start of the function, from {pc_offset()} to
@ -584,14 +584,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) { RelocInfo::Mode rmode) {
switch (value.type().kind()) { switch (value.type().kind()) {
case kI32: case kI32:
TurboAssembler::Move(reg.gp(), Operand(value.to_i32(), rmode)); MacroAssembler::Move(reg.gp(), Operand(value.to_i32(), rmode));
break; break;
case kI64: { case kI64: {
DCHECK(RelocInfo::IsNoInfo(rmode)); DCHECK(RelocInfo::IsNoInfo(rmode));
int32_t low_word = value.to_i64(); int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32; int32_t high_word = value.to_i64() >> 32;
TurboAssembler::Move(reg.low_gp(), Operand(low_word)); MacroAssembler::Move(reg.low_gp(), Operand(low_word));
TurboAssembler::Move(reg.high_gp(), Operand(high_word)); MacroAssembler::Move(reg.high_gp(), Operand(high_word));
break; break;
} }
case kF32: case kF32:
@ -1450,7 +1450,7 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
DCHECK(kind == kI32 || is_reference(kind)); DCHECK(kind == kI32 || is_reference(kind));
TurboAssembler::Move(dst, src); MacroAssembler::Move(dst, src);
} }
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
@ -1828,7 +1828,7 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount) { Register amount) {
liftoff::I64Shiftop<&TurboAssembler::LslPair, true>(this, dst, src, amount); liftoff::I64Shiftop<&MacroAssembler::LslPair, true>(this, dst, src, amount);
} }
void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
@ -1843,7 +1843,7 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
Register amount) { Register amount) {
liftoff::I64Shiftop<&TurboAssembler::AsrPair, false>(this, dst, src, amount); liftoff::I64Shiftop<&MacroAssembler::AsrPair, false>(this, dst, src, amount);
} }
void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
@ -1858,7 +1858,7 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount) { Register amount) {
liftoff::I64Shiftop<&TurboAssembler::LsrPair, false>(this, dst, src, amount); liftoff::I64Shiftop<&MacroAssembler::LsrPair, false>(this, dst, src, amount);
} }
void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
@ -2085,7 +2085,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister src, Label* trap) { LiftoffRegister src, Label* trap) {
switch (opcode) { switch (opcode) {
case kExprI32ConvertI64: case kExprI32ConvertI64:
TurboAssembler::Move(dst.gp(), src.low_gp()); MacroAssembler::Move(dst.gp(), src.low_gp());
return true; return true;
case kExprI32SConvertF32: { case kExprI32SConvertF32: {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
@ -2272,7 +2272,7 @@ void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
TurboAssembler::Move(dst.low_gp(), src.low_gp()); MacroAssembler::Move(dst.low_gp(), src.low_gp());
mov(dst.high_gp(), Operand(src.low_gp(), ASR, 31)); mov(dst.high_gp(), Operand(src.low_gp(), ASR, 31));
} }
@ -2472,7 +2472,7 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else if (memtype == MachineType::Int64()) { } else if (memtype == MachineType::Int64()) {
vld1(Neon32, NeonListOperand(dst.low_fp()), vld1(Neon32, NeonListOperand(dst.low_fp()),
NeonMemOperand(actual_src_addr)); NeonMemOperand(actual_src_addr));
TurboAssembler::Move(dst.high_fp(), dst.low_fp()); MacroAssembler::Move(dst.high_fp(), dst.low_fp());
} }
} }
} }
@ -2484,13 +2484,13 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register actual_src_addr = liftoff::CalculateActualAddress( Register actual_src_addr = liftoff::CalculateActualAddress(
this, &temps, addr, offset_reg, offset_imm); this, &temps, addr, offset_reg, offset_imm);
TurboAssembler::Move(liftoff::GetSimd128Register(dst), MacroAssembler::Move(liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(src)); liftoff::GetSimd128Register(src));
*protected_load_pc = pc_offset(); *protected_load_pc = pc_offset();
LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx); LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx);
NeonListOperand dst_op = NeonListOperand dst_op =
NeonListOperand(load_params.low_op ? dst.low_fp() : dst.high_fp()); NeonListOperand(load_params.low_op ? dst.low_fp() : dst.high_fp());
TurboAssembler::LoadLane(load_params.sz, dst_op, load_params.laneidx, MacroAssembler::LoadLane(load_params.sz, dst_op, load_params.laneidx,
NeonMemOperand(actual_src_addr)); NeonMemOperand(actual_src_addr));
} }
@ -2506,7 +2506,7 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset,
LoadStoreLaneParams store_params(type.mem_rep(), laneidx); LoadStoreLaneParams store_params(type.mem_rep(), laneidx);
NeonListOperand src_op = NeonListOperand src_op =
NeonListOperand(store_params.low_op ? src.low_fp() : src.high_fp()); NeonListOperand(store_params.low_op ? src.low_fp() : src.high_fp());
TurboAssembler::StoreLane(store_params.sz, src_op, store_params.laneidx, MacroAssembler::StoreLane(store_params.sz, src_op, store_params.laneidx,
NeonMemOperand(actual_dst_addr)); NeonMemOperand(actual_dst_addr));
} }
@ -2519,7 +2519,7 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
if (dst == lhs) { if (dst == lhs) {
// dst will be overwritten, so keep the table somewhere else. // dst will be overwritten, so keep the table somewhere else.
QwNeonRegister tbl = temps.AcquireQ(); QwNeonRegister tbl = temps.AcquireQ();
TurboAssembler::Move(tbl, liftoff::GetSimd128Register(lhs)); MacroAssembler::Move(tbl, liftoff::GetSimd128Register(lhs));
table = NeonListOperand(tbl); table = NeonListOperand(tbl);
} }
@ -2564,8 +2564,8 @@ void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst,
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst, void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
TurboAssembler::Move(dst.low_fp(), src.fp()); MacroAssembler::Move(dst.low_fp(), src.fp());
TurboAssembler::Move(dst.high_fp(), src.fp()); MacroAssembler::Move(dst.high_fp(), src.fp());
} }
void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst, void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
@ -4243,7 +4243,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
void LiftoffAssembler::AssertUnreachable(AbortReason reason) { void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
// Asserts unreachable within the wasm code. // Asserts unreachable within the wasm code.
TurboAssembler::AssertUnreachable(reason); MacroAssembler::AssertUnreachable(reason);
} }
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {

View File

@ -357,7 +357,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
bind(&continuation); bind(&continuation);
// Now allocate the stack space. Note that this might do more than just // Now allocate the stack space. Note that this might do more than just
// decrementing the SP; consult {TurboAssembler::Claim}. // decrementing the SP; consult {MacroAssembler::Claim}.
Claim(frame_size, 1); Claim(frame_size, 1);
// Jump back to the start of the function, from {pc_offset()} to // Jump back to the start of the function, from {pc_offset()} to
@ -3252,7 +3252,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
} }
void LiftoffAssembler::AssertUnreachable(AbortReason reason) { void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
TurboAssembler::AssertUnreachable(reason); MacroAssembler::AssertUnreachable(reason);
} }
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {

View File

@ -288,7 +288,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
bind(&continuation); bind(&continuation);
// Now allocate the stack space. Note that this might do more than just // Now allocate the stack space. Note that this might do more than just
// decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}.
AllocateStackSpace(frame_size); AllocateStackSpace(frame_size);
// Jump back to the start of the function, from {pc_offset()} to // Jump back to the start of the function, from {pc_offset()} to
@ -319,21 +319,21 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) { RelocInfo::Mode rmode) {
switch (value.type().kind()) { switch (value.type().kind()) {
case kI32: case kI32:
TurboAssembler::Move(reg.gp(), Immediate(value.to_i32(), rmode)); MacroAssembler::Move(reg.gp(), Immediate(value.to_i32(), rmode));
break; break;
case kI64: { case kI64: {
DCHECK(RelocInfo::IsNoInfo(rmode)); DCHECK(RelocInfo::IsNoInfo(rmode));
int32_t low_word = value.to_i64(); int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32; int32_t high_word = value.to_i64() >> 32;
TurboAssembler::Move(reg.low_gp(), Immediate(low_word)); MacroAssembler::Move(reg.low_gp(), Immediate(low_word));
TurboAssembler::Move(reg.high_gp(), Immediate(high_word)); MacroAssembler::Move(reg.high_gp(), Immediate(high_word));
break; break;
} }
case kF32: case kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break; break;
case kF64: case kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -1704,7 +1704,7 @@ inline LiftoffRegister ReplaceInPair(LiftoffRegister pair, Register old_reg,
inline void Emit64BitShiftOperation( inline void Emit64BitShiftOperation(
LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src, LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src,
Register amount, void (TurboAssembler::*emit_shift)(Register, Register)) { Register amount, void (MacroAssembler::*emit_shift)(Register, Register)) {
// Temporary registers cannot overlap with {dst}. // Temporary registers cannot overlap with {dst}.
LiftoffRegList pinned{dst}; LiftoffRegList pinned{dst};
@ -1743,7 +1743,7 @@ inline void Emit64BitShiftOperation(
void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount) { Register amount) {
liftoff::Emit64BitShiftOperation(this, dst, src, amount, liftoff::Emit64BitShiftOperation(this, dst, src, amount,
&TurboAssembler::ShlPair_cl); &MacroAssembler::ShlPair_cl);
} }
void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
@ -1762,7 +1762,7 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
Register amount) { Register amount) {
liftoff::Emit64BitShiftOperation(this, dst, src, amount, liftoff::Emit64BitShiftOperation(this, dst, src, amount,
&TurboAssembler::SarPair_cl); &MacroAssembler::SarPair_cl);
} }
void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
@ -1781,7 +1781,7 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount) { Register amount) {
liftoff::Emit64BitShiftOperation(this, dst, src, amount, liftoff::Emit64BitShiftOperation(this, dst, src, amount,
&TurboAssembler::ShrPair_cl); &MacroAssembler::ShrPair_cl);
} }
void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
@ -2025,10 +2025,10 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31; static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) { if (dst == src) {
TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1); MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1);
Andps(dst, liftoff::kScratchDoubleReg); Andps(dst, liftoff::kScratchDoubleReg);
} else { } else {
TurboAssembler::Move(dst, kSignBit - 1); MacroAssembler::Move(dst, kSignBit - 1);
Andps(dst, src); Andps(dst, src);
} }
} }
@ -2036,10 +2036,10 @@ void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31; static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) { if (dst == src) {
TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit); MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit);
Xorps(dst, liftoff::kScratchDoubleReg); Xorps(dst, liftoff::kScratchDoubleReg);
} else { } else {
TurboAssembler::Move(dst, kSignBit); MacroAssembler::Move(dst, kSignBit);
Xorps(dst, src); Xorps(dst, src);
} }
} }
@ -2162,10 +2162,10 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) {
static constexpr uint64_t kSignBit = uint64_t{1} << 63; static constexpr uint64_t kSignBit = uint64_t{1} << 63;
if (dst == src) { if (dst == src) {
TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1); MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1);
Andpd(dst, liftoff::kScratchDoubleReg); Andpd(dst, liftoff::kScratchDoubleReg);
} else { } else {
TurboAssembler::Move(dst, kSignBit - 1); MacroAssembler::Move(dst, kSignBit - 1);
Andpd(dst, src); Andpd(dst, src);
} }
} }
@ -2173,10 +2173,10 @@ void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) {
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint64_t kSignBit = uint64_t{1} << 63; static constexpr uint64_t kSignBit = uint64_t{1} << 63;
if (dst == src) { if (dst == src) {
TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit); MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit);
Xorpd(dst, liftoff::kScratchDoubleReg); Xorpd(dst, liftoff::kScratchDoubleReg);
} else { } else {
TurboAssembler::Move(dst, kSignBit); MacroAssembler::Move(dst, kSignBit);
Xorpd(dst, src); Xorpd(dst, src);
} }
} }
@ -2739,7 +2739,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
assm->cmov(zero, dst.gp(), tmp); assm->cmov(zero, dst.gp(), tmp);
} }
template <void (SharedTurboAssembler::*pcmp)(XMMRegister, XMMRegister)> template <void (SharedMacroAssemblerBase::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst, inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src, LiftoffRegister src,
base::Optional<CpuFeature> feature = base::nullopt) { base::Optional<CpuFeature> feature = base::nullopt) {
@ -3279,14 +3279,14 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) { const uint8_t imms[16]) {
uint64_t vals[2]; uint64_t vals[2];
memcpy(vals, imms, sizeof(vals)); memcpy(vals, imms, sizeof(vals));
TurboAssembler::Move(dst.fp(), vals[0]); MacroAssembler::Move(dst.fp(), vals[0]);
uint64_t high = vals[1]; uint64_t high = vals[1];
Register tmp = GetUnusedRegister(RegClass::kGpReg, {}).gp(); Register tmp = GetUnusedRegister(RegClass::kGpReg, {}).gp();
TurboAssembler::Move(tmp, Immediate(high & 0xffff'ffff)); MacroAssembler::Move(tmp, Immediate(high & 0xffff'ffff));
Pinsrd(dst.fp(), tmp, 2); Pinsrd(dst.fp(), tmp, 2);
TurboAssembler::Move(tmp, Immediate(high >> 32)); MacroAssembler::Move(tmp, Immediate(high >> 32));
Pinsrd(dst.fp(), tmp, 3); Pinsrd(dst.fp(), tmp, 3);
} }
@ -3347,7 +3347,7 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src); liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqb>(this, dst, src);
} }
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
@ -3483,7 +3483,7 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst, void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src); liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqw>(this, dst, src);
} }
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst, void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
@ -3694,7 +3694,7 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst, void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src); liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqd>(this, dst, src);
} }
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst, void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
@ -3866,7 +3866,7 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst, void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1); liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqq>(this, dst, src, SSE4_1);
} }
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
@ -4591,7 +4591,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
} }
void LiftoffAssembler::AssertUnreachable(AbortReason reason) { void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
TurboAssembler::AssertUnreachable(reason); MacroAssembler::AssertUnreachable(reason);
} }
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {

View File

@ -610,7 +610,7 @@ AssemblerOptions DefaultLiftoffOptions() { return AssemblerOptions{}; }
} // namespace } // namespace
LiftoffAssembler::LiftoffAssembler(std::unique_ptr<AssemblerBuffer> buffer) LiftoffAssembler::LiftoffAssembler(std::unique_ptr<AssemblerBuffer> buffer)
: TurboAssembler(nullptr, DefaultLiftoffOptions(), CodeObjectRequired::kNo, : MacroAssembler(nullptr, DefaultLiftoffOptions(), CodeObjectRequired::kNo,
std::move(buffer)) { std::move(buffer)) {
set_abort_hard(true); // Avoid calls to Abort. set_abort_hard(true); // Avoid calls to Abort.
} }

View File

@ -98,7 +98,7 @@ class FreezeCacheState {
#endif #endif
}; };
class LiftoffAssembler : public TurboAssembler { class LiftoffAssembler : public MacroAssembler {
public: public:
// Each slot in our stack frame currently has exactly 8 bytes. // Each slot in our stack frame currently has exactly 8 bytes.
static constexpr int kStackSlotSize = 8; static constexpr int kStackSlotSize = 8;

View File

@ -222,7 +222,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// We can't run out of space, just pass anything big enough to not cause the // We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer. // assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256; constexpr int kAvailableSpace = 256;
TurboAssembler patching_assembler( MacroAssembler patching_assembler(
nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
@ -313,16 +313,16 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) { RelocInfo::Mode rmode) {
switch (value.type().kind()) { switch (value.type().kind()) {
case kI32: case kI32:
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); MacroAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break; break;
case kI64: case kI64:
TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); MacroAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
break; break;
case kF32: case kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break; break;
case kF64: case kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -441,27 +441,27 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break; break;
case LoadType::kI32Load16U: case LoadType::kI32Load16U:
case LoadType::kI64Load16U: case LoadType::kI64Load16U:
TurboAssembler::Ld_hu(dst.gp(), src_op); MacroAssembler::Ld_hu(dst.gp(), src_op);
break; break;
case LoadType::kI32Load16S: case LoadType::kI32Load16S:
case LoadType::kI64Load16S: case LoadType::kI64Load16S:
TurboAssembler::Ld_h(dst.gp(), src_op); MacroAssembler::Ld_h(dst.gp(), src_op);
break; break;
case LoadType::kI64Load32U: case LoadType::kI64Load32U:
TurboAssembler::Ld_wu(dst.gp(), src_op); MacroAssembler::Ld_wu(dst.gp(), src_op);
break; break;
case LoadType::kI32Load: case LoadType::kI32Load:
case LoadType::kI64Load32S: case LoadType::kI64Load32S:
TurboAssembler::Ld_w(dst.gp(), src_op); MacroAssembler::Ld_w(dst.gp(), src_op);
break; break;
case LoadType::kI64Load: case LoadType::kI64Load:
TurboAssembler::Ld_d(dst.gp(), src_op); MacroAssembler::Ld_d(dst.gp(), src_op);
break; break;
case LoadType::kF32Load: case LoadType::kF32Load:
TurboAssembler::Fld_s(dst.fp(), src_op); MacroAssembler::Fld_s(dst.fp(), src_op);
break; break;
case LoadType::kF64Load: case LoadType::kF64Load:
TurboAssembler::Fld_d(dst.fp(), src_op); MacroAssembler::Fld_d(dst.fp(), src_op);
break; break;
case LoadType::kS128Load: case LoadType::kS128Load:
UNREACHABLE(); UNREACHABLE();
@ -487,20 +487,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
break; break;
case StoreType::kI32Store16: case StoreType::kI32Store16:
case StoreType::kI64Store16: case StoreType::kI64Store16:
TurboAssembler::St_h(src.gp(), dst_op); MacroAssembler::St_h(src.gp(), dst_op);
break; break;
case StoreType::kI32Store: case StoreType::kI32Store:
case StoreType::kI64Store32: case StoreType::kI64Store32:
TurboAssembler::St_w(src.gp(), dst_op); MacroAssembler::St_w(src.gp(), dst_op);
break; break;
case StoreType::kI64Store: case StoreType::kI64Store:
TurboAssembler::St_d(src.gp(), dst_op); MacroAssembler::St_d(src.gp(), dst_op);
break; break;
case StoreType::kF32Store: case StoreType::kF32Store:
TurboAssembler::Fst_s(src.fp(), dst_op); MacroAssembler::Fst_s(src.fp(), dst_op);
break; break;
case StoreType::kF64Store: case StoreType::kF64Store:
TurboAssembler::Fst_d(src.fp(), dst_op); MacroAssembler::Fst_d(src.fp(), dst_op);
break; break;
case StoreType::kS128Store: case StoreType::kS128Store:
UNREACHABLE(); UNREACHABLE();
@ -887,14 +887,14 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
// TODO(ksreten): Handle different sizes here. // TODO(ksreten): Handle different sizes here.
TurboAssembler::Move(dst, src); MacroAssembler::Move(dst, src);
} }
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) { ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (kind != kS128) { if (kind != kS128) {
TurboAssembler::Move(dst, src); MacroAssembler::Move(dst, src);
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
@ -917,7 +917,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
Fst_s(reg.fp(), dst); Fst_s(reg.fp(), dst);
break; break;
case kF64: case kF64:
TurboAssembler::Fst_d(reg.fp(), dst); MacroAssembler::Fst_d(reg.fp(), dst);
break; break;
case kS128: case kS128:
UNREACHABLE(); UNREACHABLE();
@ -934,7 +934,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
case kI32: { case kI32: {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
TurboAssembler::li(scratch, Operand(value.to_i32())); MacroAssembler::li(scratch, Operand(value.to_i32()));
St_w(scratch, dst); St_w(scratch, dst);
break; break;
} }
@ -943,7 +943,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
case kRefNull: { case kRefNull: {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
TurboAssembler::li(scratch, value.to_i64()); MacroAssembler::li(scratch, value.to_i64());
St_d(scratch, dst); St_d(scratch, dst);
break; break;
} }
@ -971,7 +971,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
Fld_s(reg.fp(), src); Fld_s(reg.fp(), src);
break; break;
case kF64: case kF64:
TurboAssembler::Fld_d(reg.fp(), src); MacroAssembler::Fld_d(reg.fp(), src);
break; break;
case kS128: case kS128:
UNREACHABLE(); UNREACHABLE();
@ -1023,16 +1023,16 @@ void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
} }
void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
TurboAssembler::Clz_d(dst.gp(), src.gp()); MacroAssembler::Clz_d(dst.gp(), src.gp());
} }
void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
TurboAssembler::Ctz_d(dst.gp(), src.gp()); MacroAssembler::Ctz_d(dst.gp(), src.gp());
} }
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
TurboAssembler::Popcnt_d(dst.gp(), src.gp()); MacroAssembler::Popcnt_d(dst.gp(), src.gp());
return true; return true;
} }
@ -1046,42 +1046,42 @@ void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
} }
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul_w(dst, lhs, rhs); MacroAssembler::Mul_w(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero, Label* trap_div_by_zero,
Label* trap_div_unrepresentable) { Label* trap_div_unrepresentable) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
// Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
TurboAssembler::li(kScratchReg, 1); MacroAssembler::li(kScratchReg, 1);
TurboAssembler::li(kScratchReg2, 1); MacroAssembler::li(kScratchReg2, 1);
TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); MacroAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
add_d(kScratchReg, kScratchReg, kScratchReg2); add_d(kScratchReg, kScratchReg, kScratchReg2);
TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg)); Operand(zero_reg));
TurboAssembler::Div_w(dst, lhs, rhs); MacroAssembler::Div_w(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
TurboAssembler::Div_wu(dst, lhs, rhs); MacroAssembler::Div_wu(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
TurboAssembler::Mod_w(dst, lhs, rhs); MacroAssembler::Mod_w(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
TurboAssembler::Mod_wu(dst, lhs, rhs); MacroAssembler::Mod_wu(dst, lhs, rhs);
} }
#define I32_BINOP(name, instruction) \ #define I32_BINOP(name, instruction) \
@ -1117,15 +1117,15 @@ I32_BINOP_I(xor, Xor)
#undef I32_BINOP_I #undef I32_BINOP_I
void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
TurboAssembler::Clz_w(dst, src); MacroAssembler::Clz_w(dst, src);
} }
void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
TurboAssembler::Ctz_w(dst, src); MacroAssembler::Ctz_w(dst, src);
} }
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
TurboAssembler::Popcnt_w(dst, src); MacroAssembler::Popcnt_w(dst, src);
return true; return true;
} }
@ -1150,55 +1150,55 @@ I32_SHIFTOP_I(shr, srl_w, srli_w)
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) { int64_t imm) {
TurboAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm)); MacroAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm));
} }
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
TurboAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp());
} }
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, LiftoffRegister rhs,
Label* trap_div_by_zero, Label* trap_div_by_zero,
Label* trap_div_unrepresentable) { Label* trap_div_unrepresentable) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
// Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable. // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
TurboAssembler::li(kScratchReg, 1); MacroAssembler::li(kScratchReg, 1);
TurboAssembler::li(kScratchReg2, 1); MacroAssembler::li(kScratchReg2, 1);
TurboAssembler::LoadZeroOnCondition( MacroAssembler::LoadZeroOnCondition(
kScratchReg, lhs.gp(), Operand(std::numeric_limits<int64_t>::min()), eq); kScratchReg, lhs.gp(), Operand(std::numeric_limits<int64_t>::min()), eq);
TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq); MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq);
add_d(kScratchReg, kScratchReg, kScratchReg2); add_d(kScratchReg, kScratchReg, kScratchReg2);
TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg)); Operand(zero_reg));
TurboAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp());
return true; return true;
} }
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, LiftoffRegister rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
TurboAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp());
return true; return true;
} }
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, LiftoffRegister rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
TurboAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp());
return true; return true;
} }
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, LiftoffRegister rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
TurboAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp());
return true; return true;
} }
@ -1256,32 +1256,32 @@ void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
} }
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
TurboAssembler::Neg_s(dst, src); MacroAssembler::Neg_s(dst, src);
} }
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
TurboAssembler::Neg_d(dst, src); MacroAssembler::Neg_d(dst, src);
} }
void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
Label ool, done; Label ool, done;
TurboAssembler::Float32Min(dst, lhs, rhs, &ool); MacroAssembler::Float32Min(dst, lhs, rhs, &ool);
Branch(&done); Branch(&done);
bind(&ool); bind(&ool);
TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs); MacroAssembler::Float32MinOutOfLine(dst, lhs, rhs);
bind(&done); bind(&done);
} }
void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
Label ool, done; Label ool, done;
TurboAssembler::Float32Max(dst, lhs, rhs, &ool); MacroAssembler::Float32Max(dst, lhs, rhs, &ool);
Branch(&done); Branch(&done);
bind(&ool); bind(&ool);
TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs); MacroAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
bind(&done); bind(&done);
} }
@ -1293,22 +1293,22 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
Label ool, done; Label ool, done;
TurboAssembler::Float64Min(dst, lhs, rhs, &ool); MacroAssembler::Float64Min(dst, lhs, rhs, &ool);
Branch(&done); Branch(&done);
bind(&ool); bind(&ool);
TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs); MacroAssembler::Float64MinOutOfLine(dst, lhs, rhs);
bind(&done); bind(&done);
} }
void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
Label ool, done; Label ool, done;
TurboAssembler::Float64Max(dst, lhs, rhs, &ool); MacroAssembler::Float64Max(dst, lhs, rhs, &ool);
Branch(&done); Branch(&done);
bind(&ool); bind(&ool);
TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs); MacroAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
bind(&done); bind(&done);
} }
@ -1362,7 +1362,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister src, Label* trap) { LiftoffRegister src, Label* trap) {
switch (opcode) { switch (opcode) {
case kExprI32ConvertI64: case kExprI32ConvertI64:
TurboAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0); MacroAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0);
return true; return true;
case kExprI32SConvertF32: { case kExprI32SConvertF32: {
LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
@ -1370,20 +1370,20 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion. // Real conversion.
TurboAssembler::Trunc_s(rounded.fp(), src.fp()); MacroAssembler::Trunc_s(rounded.fp(), src.fp());
ftintrz_w_s(kScratchDoubleReg, rounded.fp()); ftintrz_w_s(kScratchDoubleReg, rounded.fp());
movfr2gr_s(dst.gp(), kScratchDoubleReg); movfr2gr_s(dst.gp(), kScratchDoubleReg);
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
// because INT32_MIN allows easier out-of-bounds detection. // because INT32_MIN allows easier out-of-bounds detection.
TurboAssembler::Add_w(kScratchReg, dst.gp(), 1); MacroAssembler::Add_w(kScratchReg, dst.gp(), 1);
TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
// Checking if trap. // Checking if trap.
movgr2fr_w(kScratchDoubleReg, dst.gp()); movgr2fr_w(kScratchDoubleReg, dst.gp());
ffint_s_w(converted_back.fp(), kScratchDoubleReg); ffint_s_w(converted_back.fp(), kScratchDoubleReg);
TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); MacroAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
TurboAssembler::BranchFalseF(trap); MacroAssembler::BranchFalseF(trap);
return true; return true;
} }
case kExprI32UConvertF32: { case kExprI32UConvertF32: {
@ -1392,18 +1392,18 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion. // Real conversion.
TurboAssembler::Trunc_s(rounded.fp(), src.fp()); MacroAssembler::Trunc_s(rounded.fp(), src.fp());
TurboAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); MacroAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
// Avoid UINT32_MAX as an overflow indicator and use 0 instead, // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
// because 0 allows easier out-of-bounds detection. // because 0 allows easier out-of-bounds detection.
TurboAssembler::Add_w(kScratchReg, dst.gp(), 1); MacroAssembler::Add_w(kScratchReg, dst.gp(), 1);
TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg); MacroAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
// Checking if trap. // Checking if trap.
TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); MacroAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
fcvt_s_d(converted_back.fp(), converted_back.fp()); fcvt_s_d(converted_back.fp(), converted_back.fp());
TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); MacroAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
TurboAssembler::BranchFalseF(trap); MacroAssembler::BranchFalseF(trap);
return true; return true;
} }
case kExprI32SConvertF64: { case kExprI32SConvertF64: {
@ -1412,14 +1412,14 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion. // Real conversion.
TurboAssembler::Trunc_d(rounded.fp(), src.fp()); MacroAssembler::Trunc_d(rounded.fp(), src.fp());
ftintrz_w_d(kScratchDoubleReg, rounded.fp()); ftintrz_w_d(kScratchDoubleReg, rounded.fp());
movfr2gr_s(dst.gp(), kScratchDoubleReg); movfr2gr_s(dst.gp(), kScratchDoubleReg);
// Checking if trap. // Checking if trap.
ffint_d_w(converted_back.fp(), kScratchDoubleReg); ffint_d_w(converted_back.fp(), kScratchDoubleReg);
TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); MacroAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
TurboAssembler::BranchFalseF(trap); MacroAssembler::BranchFalseF(trap);
return true; return true;
} }
case kExprI32UConvertF64: { case kExprI32UConvertF64: {
@ -1428,23 +1428,23 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion. // Real conversion.
TurboAssembler::Trunc_d(rounded.fp(), src.fp()); MacroAssembler::Trunc_d(rounded.fp(), src.fp());
TurboAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); MacroAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
// Checking if trap. // Checking if trap.
TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); MacroAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); MacroAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
TurboAssembler::BranchFalseF(trap); MacroAssembler::BranchFalseF(trap);
return true; return true;
} }
case kExprI32ReinterpretF32: case kExprI32ReinterpretF32:
TurboAssembler::FmoveLow(dst.gp(), src.fp()); MacroAssembler::FmoveLow(dst.gp(), src.fp());
return true; return true;
case kExprI64SConvertI32: case kExprI64SConvertI32:
slli_w(dst.gp(), src.gp(), 0); slli_w(dst.gp(), src.gp(), 0);
return true; return true;
case kExprI64UConvertI32: case kExprI64UConvertI32:
TurboAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0); MacroAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0);
return true; return true;
case kExprI64SConvertF32: { case kExprI64SConvertF32: {
LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
@ -1452,29 +1452,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion. // Real conversion.
TurboAssembler::Trunc_s(rounded.fp(), src.fp()); MacroAssembler::Trunc_s(rounded.fp(), src.fp());
ftintrz_l_s(kScratchDoubleReg, rounded.fp()); ftintrz_l_s(kScratchDoubleReg, rounded.fp());
movfr2gr_d(dst.gp(), kScratchDoubleReg); movfr2gr_d(dst.gp(), kScratchDoubleReg);
// Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
// because INT64_MIN allows easier out-of-bounds detection. // because INT64_MIN allows easier out-of-bounds detection.
TurboAssembler::Add_d(kScratchReg, dst.gp(), 1); MacroAssembler::Add_d(kScratchReg, dst.gp(), 1);
TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
// Checking if trap. // Checking if trap.
movgr2fr_d(kScratchDoubleReg, dst.gp()); movgr2fr_d(kScratchDoubleReg, dst.gp());
ffint_s_l(converted_back.fp(), kScratchDoubleReg); ffint_s_l(converted_back.fp(), kScratchDoubleReg);
TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); MacroAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
TurboAssembler::BranchFalseF(trap); MacroAssembler::BranchFalseF(trap);
return true; return true;
} }
case kExprI64UConvertF32: { case kExprI64UConvertF32: {
// Real conversion. // Real conversion.
TurboAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, MacroAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg,
kScratchReg); kScratchReg);
// Checking if trap. // Checking if trap.
TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
return true; return true;
} }
case kExprI64SConvertF64: { case kExprI64SConvertF64: {
@ -1483,29 +1483,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion. // Real conversion.
TurboAssembler::Trunc_d(rounded.fp(), src.fp()); MacroAssembler::Trunc_d(rounded.fp(), src.fp());
ftintrz_l_d(kScratchDoubleReg, rounded.fp()); ftintrz_l_d(kScratchDoubleReg, rounded.fp());
movfr2gr_d(dst.gp(), kScratchDoubleReg); movfr2gr_d(dst.gp(), kScratchDoubleReg);
// Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
// because INT64_MIN allows easier out-of-bounds detection. // because INT64_MIN allows easier out-of-bounds detection.
TurboAssembler::Add_d(kScratchReg, dst.gp(), 1); MacroAssembler::Add_d(kScratchReg, dst.gp(), 1);
TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
// Checking if trap. // Checking if trap.
movgr2fr_d(kScratchDoubleReg, dst.gp()); movgr2fr_d(kScratchDoubleReg, dst.gp());
ffint_d_l(converted_back.fp(), kScratchDoubleReg); ffint_d_l(converted_back.fp(), kScratchDoubleReg);
TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); MacroAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
TurboAssembler::BranchFalseF(trap); MacroAssembler::BranchFalseF(trap);
return true; return true;
} }
case kExprI64UConvertF64: { case kExprI64UConvertF64: {
// Real conversion. // Real conversion.
TurboAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, MacroAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg,
kScratchReg); kScratchReg);
// Checking if trap. // Checking if trap.
TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
return true; return true;
} }
case kExprI64ReinterpretF64: case kExprI64ReinterpretF64:
@ -1518,13 +1518,13 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true; return true;
} }
case kExprF32UConvertI32: case kExprF32UConvertI32:
TurboAssembler::Ffint_s_uw(dst.fp(), src.gp()); MacroAssembler::Ffint_s_uw(dst.fp(), src.gp());
return true; return true;
case kExprF32ConvertF64: case kExprF32ConvertF64:
fcvt_s_d(dst.fp(), src.fp()); fcvt_s_d(dst.fp(), src.fp());
return true; return true;
case kExprF32ReinterpretI32: case kExprF32ReinterpretI32:
TurboAssembler::FmoveLow(dst.fp(), src.gp()); MacroAssembler::FmoveLow(dst.fp(), src.gp());
return true; return true;
case kExprF64SConvertI32: { case kExprF64SConvertI32: {
LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst}); LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
@ -1533,7 +1533,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true; return true;
} }
case kExprF64UConvertI32: case kExprF64UConvertI32:
TurboAssembler::Ffint_d_uw(dst.fp(), src.gp()); MacroAssembler::Ffint_d_uw(dst.fp(), src.gp());
return true; return true;
case kExprF64ConvertF32: case kExprF64ConvertF32:
fcvt_d_s(dst.fp(), src.fp()); fcvt_d_s(dst.fp(), src.fp());
@ -1548,7 +1548,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI32UConvertSatF32: { case kExprI32UConvertSatF32: {
Label isnan_or_lessthan_or_equal_zero; Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg); mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0)); MacroAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
CompareF32(src.fp(), kScratchDoubleReg, CULE); CompareF32(src.fp(), kScratchDoubleReg, CULE);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Ftintrz_uw_s(dst.gp(), src.fp(), kScratchDoubleReg); Ftintrz_uw_s(dst.gp(), src.fp(), kScratchDoubleReg);
@ -1562,7 +1562,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI32UConvertSatF64: { case kExprI32UConvertSatF64: {
Label isnan_or_lessthan_or_equal_zero; Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg); mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0)); MacroAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
CompareF64(src.fp(), kScratchDoubleReg, CULE); CompareF64(src.fp(), kScratchDoubleReg, CULE);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Ftintrz_uw_d(dst.gp(), src.fp(), kScratchDoubleReg); Ftintrz_uw_d(dst.gp(), src.fp(), kScratchDoubleReg);
@ -1576,7 +1576,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI64UConvertSatF32: { case kExprI64UConvertSatF32: {
Label isnan_or_lessthan_or_equal_zero; Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg); mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0)); MacroAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
CompareF32(src.fp(), kScratchDoubleReg, CULE); CompareF32(src.fp(), kScratchDoubleReg, CULE);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg); Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg);
@ -1590,7 +1590,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI64UConvertSatF64: { case kExprI64UConvertSatF64: {
Label isnan_or_lessthan_or_equal_zero; Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg); mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0)); MacroAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
CompareF64(src.fp(), kScratchDoubleReg, CULE); CompareF64(src.fp(), kScratchDoubleReg, CULE);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg); Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg);
@ -1626,11 +1626,11 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
} }
void LiftoffAssembler::emit_jump(Label* label) { void LiftoffAssembler::emit_jump(Label* label) {
TurboAssembler::Branch(label); MacroAssembler::Branch(label);
} }
void LiftoffAssembler::emit_jump(Register target) { void LiftoffAssembler::emit_jump(Register target) {
TurboAssembler::Jump(target); MacroAssembler::Jump(target);
} }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
@ -1639,25 +1639,25 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
const FreezeCacheState& frozen) { const FreezeCacheState& frozen) {
if (rhs == no_reg) { if (rhs == no_reg) {
DCHECK(kind == kI32 || kind == kI64); DCHECK(kind == kI32 || kind == kI64);
TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else { } else {
DCHECK((kind == kI32 || kind == kI64) || DCHECK((kind == kI32 || kind == kI64) ||
(is_reference(kind) && (cond == kEqual || cond == kNotEqual))); (is_reference(kind) && (cond == kEqual || cond == kNotEqual)));
TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); MacroAssembler::Branch(label, cond, lhs, Operand(rhs));
} }
} }
void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
Register lhs, int32_t imm, Register lhs, int32_t imm,
const FreezeCacheState& frozen) { const FreezeCacheState& frozen) {
TurboAssembler::Branch(label, cond, lhs, Operand(imm)); MacroAssembler::Branch(label, cond, lhs, Operand(imm));
} }
void LiftoffAssembler::emit_i32_subi_jump_negative( void LiftoffAssembler::emit_i32_subi_jump_negative(
Register value, int subtrahend, Label* result_negative, Register value, int subtrahend, Label* result_negative,
const FreezeCacheState& frozen) { const FreezeCacheState& frozen) {
TurboAssembler::Sub_d(value, value, Operand(subtrahend)); MacroAssembler::Sub_d(value, value, Operand(subtrahend));
TurboAssembler::Branch(result_negative, less, value, Operand(zero_reg)); MacroAssembler::Branch(result_negative, less, value, Operand(zero_reg));
} }
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
@ -1671,14 +1671,14 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp(); tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
} }
// Write 1 as result. // Write 1 as result.
TurboAssembler::li(tmp, 1); MacroAssembler::li(tmp, 1);
// If negative condition is true, write 0 as result. // If negative condition is true, write 0 as result.
Condition neg_cond = NegateCondition(cond); Condition neg_cond = NegateCondition(cond);
TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); MacroAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
// If tmp != dst, result will be moved. // If tmp != dst, result will be moved.
TurboAssembler::Move(dst, tmp); MacroAssembler::Move(dst, tmp);
} }
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
@ -1693,15 +1693,15 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp(); tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
} }
// Write 1 as result. // Write 1 as result.
TurboAssembler::li(tmp, 1); MacroAssembler::li(tmp, 1);
// If negative condition is true, write 0 as result. // If negative condition is true, write 0 as result.
Condition neg_cond = NegateCondition(cond); Condition neg_cond = NegateCondition(cond);
TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), MacroAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
neg_cond); neg_cond);
// If tmp != dst, result will be moved. // If tmp != dst, result will be moved.
TurboAssembler::Move(dst, tmp); MacroAssembler::Move(dst, tmp);
} }
namespace liftoff { namespace liftoff {
@ -1740,26 +1740,26 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
Label not_nan, cont; Label not_nan, cont;
TurboAssembler::CompareIsNanF32(lhs, rhs); MacroAssembler::CompareIsNanF32(lhs, rhs);
TurboAssembler::BranchFalseF(&not_nan); MacroAssembler::BranchFalseF(&not_nan);
// If one of the operands is NaN, return 1 for f32.ne, else 0. // If one of the operands is NaN, return 1 for f32.ne, else 0.
if (cond == ne) { if (cond == ne) {
TurboAssembler::li(dst, 1); MacroAssembler::li(dst, 1);
} else { } else {
TurboAssembler::Move(dst, zero_reg); MacroAssembler::Move(dst, zero_reg);
} }
TurboAssembler::Branch(&cont); MacroAssembler::Branch(&cont);
bind(&not_nan); bind(&not_nan);
TurboAssembler::li(dst, 1); MacroAssembler::li(dst, 1);
bool predicate; bool predicate;
FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF32(lhs, rhs, fcond); MacroAssembler::CompareF32(lhs, rhs, fcond);
if (predicate) { if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst); MacroAssembler::LoadZeroIfNotFPUCondition(dst);
} else { } else {
TurboAssembler::LoadZeroIfFPUCondition(dst); MacroAssembler::LoadZeroIfFPUCondition(dst);
} }
bind(&cont); bind(&cont);
@ -1769,26 +1769,26 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
DoubleRegister lhs, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
Label not_nan, cont; Label not_nan, cont;
TurboAssembler::CompareIsNanF64(lhs, rhs); MacroAssembler::CompareIsNanF64(lhs, rhs);
TurboAssembler::BranchFalseF(&not_nan); MacroAssembler::BranchFalseF(&not_nan);
// If one of the operands is NaN, return 1 for f64.ne, else 0. // If one of the operands is NaN, return 1 for f64.ne, else 0.
if (cond == ne) { if (cond == ne) {
TurboAssembler::li(dst, 1); MacroAssembler::li(dst, 1);
} else { } else {
TurboAssembler::Move(dst, zero_reg); MacroAssembler::Move(dst, zero_reg);
} }
TurboAssembler::Branch(&cont); MacroAssembler::Branch(&cont);
bind(&not_nan); bind(&not_nan);
TurboAssembler::li(dst, 1); MacroAssembler::li(dst, 1);
bool predicate; bool predicate;
FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF64(lhs, rhs, fcond); MacroAssembler::CompareF64(lhs, rhs, fcond);
if (predicate) { if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst); MacroAssembler::LoadZeroIfNotFPUCondition(dst);
} else { } else {
TurboAssembler::LoadZeroIfFPUCondition(dst); MacroAssembler::LoadZeroIfFPUCondition(dst);
} }
bind(&cont); bind(&cont);
@ -3001,8 +3001,8 @@ void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst,
} }
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
TurboAssembler::Ld_d(limit_address, MemOperand(limit_address, 0)); MacroAssembler::Ld_d(limit_address, MemOperand(limit_address, 0));
TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
} }
void LiftoffAssembler::CallTrapCallbackForTesting() { void LiftoffAssembler::CallTrapCallbackForTesting() {
@ -3036,7 +3036,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
unsigned offset = 0; unsigned offset = 0;
while (!fp_regs.is_empty()) { while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet(); LiftoffRegister reg = fp_regs.GetFirstRegSet();
TurboAssembler::Fst_d(reg.fp(), MemOperand(sp, offset)); MacroAssembler::Fst_d(reg.fp(), MemOperand(sp, offset));
fp_regs.clear(reg); fp_regs.clear(reg);
offset += slot_size; offset += slot_size;
} }
@ -3049,7 +3049,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
unsigned fp_offset = 0; unsigned fp_offset = 0;
while (!fp_regs.is_empty()) { while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet(); LiftoffRegister reg = fp_regs.GetFirstRegSet();
TurboAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset)); MacroAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset));
fp_regs.clear(reg); fp_regs.clear(reg);
fp_offset += 8; fp_offset += 8;
} }
@ -3168,7 +3168,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
addi_d(sp, sp, -size); addi_d(sp, sp, -size);
TurboAssembler::Move(addr, sp); MacroAssembler::Move(addr, sp);
} }
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {

View File

@ -176,19 +176,19 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst); assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
V8_FALLTHROUGH; V8_FALLTHROUGH;
case LoadType::kI64Load32U: case LoadType::kI64Load32U:
assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4); assm->MacroAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
break; break;
case LoadType::kI32Load: case LoadType::kI32Load:
case LoadType::kI64Load32S: case LoadType::kI64Load32S:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
break; break;
case LoadType::kI32Load16S: case LoadType::kI32Load16S:
case LoadType::kI64Load16S: case LoadType::kI64Load16S:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
break; break;
case LoadType::kI32Load16U: case LoadType::kI32Load16U:
case LoadType::kI64Load16U: case LoadType::kI64Load16U:
assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2); assm->MacroAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
break; break;
case LoadType::kF64Load: case LoadType::kF64Load:
is_float = true; is_float = true;
@ -196,7 +196,7 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst); assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
V8_FALLTHROUGH; V8_FALLTHROUGH;
case LoadType::kI64Load: case LoadType::kI64Load:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -231,10 +231,10 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src); assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
V8_FALLTHROUGH; V8_FALLTHROUGH;
case StoreType::kI32Store: case StoreType::kI32Store:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
break; break;
case StoreType::kI32Store16: case StoreType::kI32Store16:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
break; break;
case StoreType::kF64Store: case StoreType::kF64Store:
is_float = true; is_float = true;
@ -242,13 +242,13 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src); assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
V8_FALLTHROUGH; V8_FALLTHROUGH;
case StoreType::kI64Store: case StoreType::kI64Store:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
break; break;
case StoreType::kI64Store32: case StoreType::kI64Store32:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
break; break;
case StoreType::kI64Store16: case StoreType::kI64Store16:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -340,7 +340,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// We can't run out of space, just pass anything big enough to not cause the // We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer. // assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256; constexpr int kAvailableSpace = 256;
TurboAssembler patching_assembler( MacroAssembler patching_assembler(
nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
@ -429,16 +429,16 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) { RelocInfo::Mode rmode) {
switch (value.type().kind()) { switch (value.type().kind()) {
case kI32: case kI32:
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); MacroAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break; break;
case kI64: case kI64:
TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); MacroAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
break; break;
case kF32: case kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break; break;
case kF64: case kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -547,30 +547,30 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break; break;
case LoadType::kI32Load16U: case LoadType::kI32Load16U:
case LoadType::kI64Load16U: case LoadType::kI64Load16U:
TurboAssembler::Ulhu(dst.gp(), src_op); MacroAssembler::Ulhu(dst.gp(), src_op);
break; break;
case LoadType::kI32Load16S: case LoadType::kI32Load16S:
case LoadType::kI64Load16S: case LoadType::kI64Load16S:
TurboAssembler::Ulh(dst.gp(), src_op); MacroAssembler::Ulh(dst.gp(), src_op);
break; break;
case LoadType::kI64Load32U: case LoadType::kI64Load32U:
TurboAssembler::Ulwu(dst.gp(), src_op); MacroAssembler::Ulwu(dst.gp(), src_op);
break; break;
case LoadType::kI32Load: case LoadType::kI32Load:
case LoadType::kI64Load32S: case LoadType::kI64Load32S:
TurboAssembler::Ulw(dst.gp(), src_op); MacroAssembler::Ulw(dst.gp(), src_op);
break; break;
case LoadType::kI64Load: case LoadType::kI64Load:
TurboAssembler::Uld(dst.gp(), src_op); MacroAssembler::Uld(dst.gp(), src_op);
break; break;
case LoadType::kF32Load: case LoadType::kF32Load:
TurboAssembler::Ulwc1(dst.fp(), src_op, t8); MacroAssembler::Ulwc1(dst.fp(), src_op, t8);
break; break;
case LoadType::kF64Load: case LoadType::kF64Load:
TurboAssembler::Uldc1(dst.fp(), src_op, t8); MacroAssembler::Uldc1(dst.fp(), src_op, t8);
break; break;
case LoadType::kS128Load: case LoadType::kS128Load:
TurboAssembler::ld_b(dst.fp().toW(), src_op); MacroAssembler::ld_b(dst.fp().toW(), src_op);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -613,23 +613,23 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
break; break;
case StoreType::kI32Store16: case StoreType::kI32Store16:
case StoreType::kI64Store16: case StoreType::kI64Store16:
TurboAssembler::Ush(src.gp(), dst_op, t8); MacroAssembler::Ush(src.gp(), dst_op, t8);
break; break;
case StoreType::kI32Store: case StoreType::kI32Store:
case StoreType::kI64Store32: case StoreType::kI64Store32:
TurboAssembler::Usw(src.gp(), dst_op); MacroAssembler::Usw(src.gp(), dst_op);
break; break;
case StoreType::kI64Store: case StoreType::kI64Store:
TurboAssembler::Usd(src.gp(), dst_op); MacroAssembler::Usd(src.gp(), dst_op);
break; break;
case StoreType::kF32Store: case StoreType::kF32Store:
TurboAssembler::Uswc1(src.fp(), dst_op, t8); MacroAssembler::Uswc1(src.fp(), dst_op, t8);
break; break;
case StoreType::kF64Store: case StoreType::kF64Store:
TurboAssembler::Usdc1(src.fp(), dst_op, t8); MacroAssembler::Usdc1(src.fp(), dst_op, t8);
break; break;
case StoreType::kS128Store: case StoreType::kS128Store:
TurboAssembler::st_b(src.fp().toW(), dst_op); MacroAssembler::st_b(src.fp().toW(), dst_op);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -987,16 +987,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
// TODO(ksreten): Handle different sizes here. // TODO(ksreten): Handle different sizes here.
TurboAssembler::Move(dst, src); MacroAssembler::Move(dst, src);
} }
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) { ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (kind != kS128) { if (kind != kS128) {
TurboAssembler::Move(dst, src); MacroAssembler::Move(dst, src);
} else { } else {
TurboAssembler::move_v(dst.toW(), src.toW()); MacroAssembler::move_v(dst.toW(), src.toW());
} }
} }
@ -1017,10 +1017,10 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
Swc1(reg.fp(), dst); Swc1(reg.fp(), dst);
break; break;
case kF64: case kF64:
TurboAssembler::Sdc1(reg.fp(), dst); MacroAssembler::Sdc1(reg.fp(), dst);
break; break;
case kS128: case kS128:
TurboAssembler::st_b(reg.fp().toW(), dst); MacroAssembler::st_b(reg.fp().toW(), dst);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -1032,14 +1032,14 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
MemOperand dst = liftoff::GetStackSlot(offset); MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) { switch (value.type().kind()) {
case kI32: { case kI32: {
TurboAssembler::li(kScratchReg, Operand(value.to_i32())); MacroAssembler::li(kScratchReg, Operand(value.to_i32()));
Sw(kScratchReg, dst); Sw(kScratchReg, dst);
break; break;
} }
case kI64: case kI64:
case kRef: case kRef:
case kRefNull: { case kRefNull: {
TurboAssembler::li(kScratchReg, value.to_i64()); MacroAssembler::li(kScratchReg, value.to_i64());
Sd(kScratchReg, dst); Sd(kScratchReg, dst);
break; break;
} }
@ -1065,10 +1065,10 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
Lwc1(reg.fp(), src); Lwc1(reg.fp(), src);
break; break;
case kF64: case kF64:
TurboAssembler::Ldc1(reg.fp(), src); MacroAssembler::Ldc1(reg.fp(), src);
break; break;
case kS128: case kS128:
TurboAssembler::ld_b(reg.fp().toW(), src); MacroAssembler::ld_b(reg.fp().toW(), src);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -1117,16 +1117,16 @@ void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
} }
void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
TurboAssembler::Dclz(dst.gp(), src.gp()); MacroAssembler::Dclz(dst.gp(), src.gp());
} }
void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
TurboAssembler::Dctz(dst.gp(), src.gp()); MacroAssembler::Dctz(dst.gp(), src.gp());
} }
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
TurboAssembler::Dpopcnt(dst.gp(), src.gp()); MacroAssembler::Dpopcnt(dst.gp(), src.gp());
return true; return true;
} }
@ -1140,42 +1140,42 @@ void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
} }
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul(dst, lhs, rhs); MacroAssembler::Mul(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero, Label* trap_div_by_zero,
Label* trap_div_unrepresentable) { Label* trap_div_unrepresentable) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
// Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
TurboAssembler::li(kScratchReg, 1); MacroAssembler::li(kScratchReg, 1);
TurboAssembler::li(kScratchReg2, 1); MacroAssembler::li(kScratchReg2, 1);
TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); MacroAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
daddu(kScratchReg, kScratchReg, kScratchReg2); daddu(kScratchReg, kScratchReg, kScratchReg2);
TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg)); Operand(zero_reg));
TurboAssembler::Div(dst, lhs, rhs); MacroAssembler::Div(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
TurboAssembler::Divu(dst, lhs, rhs); MacroAssembler::Divu(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
TurboAssembler::Mod(dst, lhs, rhs); MacroAssembler::Mod(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
TurboAssembler::Modu(dst, lhs, rhs); MacroAssembler::Modu(dst, lhs, rhs);
} }
#define I32_BINOP(name, instruction) \ #define I32_BINOP(name, instruction) \
@ -1211,15 +1211,15 @@ I32_BINOP_I(xor, Xor)
#undef I32_BINOP_I #undef I32_BINOP_I
void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
TurboAssembler::Clz(dst, src); MacroAssembler::Clz(dst, src);
} }
void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
TurboAssembler::Ctz(dst, src); MacroAssembler::Ctz(dst, src);
} }
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
TurboAssembler::Popcnt(dst, src); MacroAssembler::Popcnt(dst, src);
return true; return true;
} }
@ -1244,55 +1244,55 @@ I32_SHIFTOP_I(shr, srl)
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) { int64_t imm) {
TurboAssembler::Daddu(dst.gp(), lhs.gp(), Operand(imm)); MacroAssembler::Daddu(dst.gp(), lhs.gp(), Operand(imm));
} }
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
TurboAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp());
} }
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, LiftoffRegister rhs,
Label* trap_div_by_zero, Label* trap_div_by_zero,
Label* trap_div_unrepresentable) { Label* trap_div_unrepresentable) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
// Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable. // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
TurboAssembler::li(kScratchReg, 1); MacroAssembler::li(kScratchReg, 1);
TurboAssembler::li(kScratchReg2, 1); MacroAssembler::li(kScratchReg2, 1);
TurboAssembler::LoadZeroOnCondition( MacroAssembler::LoadZeroOnCondition(
kScratchReg, lhs.gp(), Operand(std::numeric_limits<int64_t>::min()), eq); kScratchReg, lhs.gp(), Operand(std::numeric_limits<int64_t>::min()), eq);
TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq); MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq);
daddu(kScratchReg, kScratchReg, kScratchReg2); daddu(kScratchReg, kScratchReg, kScratchReg2);
TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg)); Operand(zero_reg));
TurboAssembler::Ddiv(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Ddiv(dst.gp(), lhs.gp(), rhs.gp());
return true; return true;
} }
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, LiftoffRegister rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
TurboAssembler::Ddivu(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Ddivu(dst.gp(), lhs.gp(), rhs.gp());
return true; return true;
} }
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, LiftoffRegister rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
TurboAssembler::Dmod(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Dmod(dst.gp(), lhs.gp(), rhs.gp());
return true; return true;
} }
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, LiftoffRegister rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
TurboAssembler::Dmodu(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Dmodu(dst.gp(), lhs.gp(), rhs.gp());
return true; return true;
} }
@ -1354,32 +1354,32 @@ void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
} }
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
TurboAssembler::Neg_s(dst, src); MacroAssembler::Neg_s(dst, src);
} }
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
TurboAssembler::Neg_d(dst, src); MacroAssembler::Neg_d(dst, src);
} }
void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
Label ool, done; Label ool, done;
TurboAssembler::Float32Min(dst, lhs, rhs, &ool); MacroAssembler::Float32Min(dst, lhs, rhs, &ool);
Branch(&done); Branch(&done);
bind(&ool); bind(&ool);
TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs); MacroAssembler::Float32MinOutOfLine(dst, lhs, rhs);
bind(&done); bind(&done);
} }
void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
Label ool, done; Label ool, done;
TurboAssembler::Float32Max(dst, lhs, rhs, &ool); MacroAssembler::Float32Max(dst, lhs, rhs, &ool);
Branch(&done); Branch(&done);
bind(&ool); bind(&ool);
TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs); MacroAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
bind(&done); bind(&done);
} }
@ -1410,22 +1410,22 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
Label ool, done; Label ool, done;
TurboAssembler::Float64Min(dst, lhs, rhs, &ool); MacroAssembler::Float64Min(dst, lhs, rhs, &ool);
Branch(&done); Branch(&done);
bind(&ool); bind(&ool);
TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs); MacroAssembler::Float64MinOutOfLine(dst, lhs, rhs);
bind(&done); bind(&done);
} }
void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
Label ool, done; Label ool, done;
TurboAssembler::Float64Max(dst, lhs, rhs, &ool); MacroAssembler::Float64Max(dst, lhs, rhs, &ool);
Branch(&done); Branch(&done);
bind(&ool); bind(&ool);
TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs); MacroAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
bind(&done); bind(&done);
} }
@ -1498,7 +1498,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister src, Label* trap) { LiftoffRegister src, Label* trap) {
switch (opcode) { switch (opcode) {
case kExprI32ConvertI64: case kExprI32ConvertI64:
TurboAssembler::Ext(dst.gp(), src.gp(), 0, 32); MacroAssembler::Ext(dst.gp(), src.gp(), 0, 32);
return true; return true;
case kExprI32SConvertF32: { case kExprI32SConvertF32: {
LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
@ -1506,20 +1506,20 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion. // Real conversion.
TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); MacroAssembler::Trunc_s_s(rounded.fp(), src.fp());
trunc_w_s(kScratchDoubleReg, rounded.fp()); trunc_w_s(kScratchDoubleReg, rounded.fp());
mfc1(dst.gp(), kScratchDoubleReg); mfc1(dst.gp(), kScratchDoubleReg);
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
// because INT32_MIN allows easier out-of-bounds detection. // because INT32_MIN allows easier out-of-bounds detection.
TurboAssembler::Addu(kScratchReg, dst.gp(), 1); MacroAssembler::Addu(kScratchReg, dst.gp(), 1);
TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
// Checking if trap. // Checking if trap.
mtc1(dst.gp(), kScratchDoubleReg); mtc1(dst.gp(), kScratchDoubleReg);
cvt_s_w(converted_back.fp(), kScratchDoubleReg); cvt_s_w(converted_back.fp(), kScratchDoubleReg);
TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); MacroAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap); MacroAssembler::BranchFalseF(trap);
return true; return true;
} }
case kExprI32UConvertF32: { case kExprI32UConvertF32: {
@ -1528,18 +1528,18 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion. // Real conversion.
TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); MacroAssembler::Trunc_s_s(rounded.fp(), src.fp());
TurboAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); MacroAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
// Avoid UINT32_MAX as an overflow indicator and use 0 instead, // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
// because 0 allows easier out-of-bounds detection. // because 0 allows easier out-of-bounds detection.
TurboAssembler::Addu(kScratchReg, dst.gp(), 1); MacroAssembler::Addu(kScratchReg, dst.gp(), 1);
TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg); MacroAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
// Checking if trap. // Checking if trap.
TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp()); MacroAssembler::Cvt_d_uw(converted_back.fp(), dst.gp());
cvt_s_d(converted_back.fp(), converted_back.fp()); cvt_s_d(converted_back.fp(), converted_back.fp());
TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); MacroAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap); MacroAssembler::BranchFalseF(trap);
return true; return true;
} }
case kExprI32SConvertF64: { case kExprI32SConvertF64: {
@ -1548,14 +1548,14 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion. // Real conversion.
TurboAssembler::Trunc_d_d(rounded.fp(), src.fp()); MacroAssembler::Trunc_d_d(rounded.fp(), src.fp());
trunc_w_d(kScratchDoubleReg, rounded.fp()); trunc_w_d(kScratchDoubleReg, rounded.fp());
mfc1(dst.gp(), kScratchDoubleReg); mfc1(dst.gp(), kScratchDoubleReg);
// Checking if trap. // Checking if trap.
cvt_d_w(converted_back.fp(), kScratchDoubleReg); cvt_d_w(converted_back.fp(), kScratchDoubleReg);
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); MacroAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap); MacroAssembler::BranchFalseF(trap);
return true; return true;
} }
case kExprI32UConvertF64: { case kExprI32UConvertF64: {
@ -1564,23 +1564,23 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion. // Real conversion.
TurboAssembler::Trunc_d_d(rounded.fp(), src.fp()); MacroAssembler::Trunc_d_d(rounded.fp(), src.fp());
TurboAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); MacroAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
// Checking if trap. // Checking if trap.
TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp()); MacroAssembler::Cvt_d_uw(converted_back.fp(), dst.gp());
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); MacroAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap); MacroAssembler::BranchFalseF(trap);
return true; return true;
} }
case kExprI32ReinterpretF32: case kExprI32ReinterpretF32:
TurboAssembler::FmoveLow(dst.gp(), src.fp()); MacroAssembler::FmoveLow(dst.gp(), src.fp());
return true; return true;
case kExprI64SConvertI32: case kExprI64SConvertI32:
sll(dst.gp(), src.gp(), 0); sll(dst.gp(), src.gp(), 0);
return true; return true;
case kExprI64UConvertI32: case kExprI64UConvertI32:
TurboAssembler::Dext(dst.gp(), src.gp(), 0, 32); MacroAssembler::Dext(dst.gp(), src.gp(), 0, 32);
return true; return true;
case kExprI64SConvertF32: { case kExprI64SConvertF32: {
LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src});
@ -1588,29 +1588,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion. // Real conversion.
TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); MacroAssembler::Trunc_s_s(rounded.fp(), src.fp());
trunc_l_s(kScratchDoubleReg, rounded.fp()); trunc_l_s(kScratchDoubleReg, rounded.fp());
dmfc1(dst.gp(), kScratchDoubleReg); dmfc1(dst.gp(), kScratchDoubleReg);
// Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
// because INT64_MIN allows easier out-of-bounds detection. // because INT64_MIN allows easier out-of-bounds detection.
TurboAssembler::Daddu(kScratchReg, dst.gp(), 1); MacroAssembler::Daddu(kScratchReg, dst.gp(), 1);
TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
// Checking if trap. // Checking if trap.
dmtc1(dst.gp(), kScratchDoubleReg); dmtc1(dst.gp(), kScratchDoubleReg);
cvt_s_l(converted_back.fp(), kScratchDoubleReg); cvt_s_l(converted_back.fp(), kScratchDoubleReg);
TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); MacroAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap); MacroAssembler::BranchFalseF(trap);
return true; return true;
} }
case kExprI64UConvertF32: { case kExprI64UConvertF32: {
// Real conversion. // Real conversion.
TurboAssembler::Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, MacroAssembler::Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg,
kScratchReg); kScratchReg);
// Checking if trap. // Checking if trap.
TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
return true; return true;
} }
case kExprI64SConvertF64: { case kExprI64SConvertF64: {
@ -1619,29 +1619,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded});
// Real conversion. // Real conversion.
TurboAssembler::Trunc_d_d(rounded.fp(), src.fp()); MacroAssembler::Trunc_d_d(rounded.fp(), src.fp());
trunc_l_d(kScratchDoubleReg, rounded.fp()); trunc_l_d(kScratchDoubleReg, rounded.fp());
dmfc1(dst.gp(), kScratchDoubleReg); dmfc1(dst.gp(), kScratchDoubleReg);
// Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
// because INT64_MIN allows easier out-of-bounds detection. // because INT64_MIN allows easier out-of-bounds detection.
TurboAssembler::Daddu(kScratchReg, dst.gp(), 1); MacroAssembler::Daddu(kScratchReg, dst.gp(), 1);
TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
// Checking if trap. // Checking if trap.
dmtc1(dst.gp(), kScratchDoubleReg); dmtc1(dst.gp(), kScratchDoubleReg);
cvt_d_l(converted_back.fp(), kScratchDoubleReg); cvt_d_l(converted_back.fp(), kScratchDoubleReg);
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); MacroAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap); MacroAssembler::BranchFalseF(trap);
return true; return true;
} }
case kExprI64UConvertF64: { case kExprI64UConvertF64: {
// Real conversion. // Real conversion.
TurboAssembler::Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, MacroAssembler::Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg,
kScratchReg); kScratchReg);
// Checking if trap. // Checking if trap.
TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
return true; return true;
} }
case kExprI64ReinterpretF64: case kExprI64ReinterpretF64:
@ -1654,13 +1654,13 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true; return true;
} }
case kExprF32UConvertI32: case kExprF32UConvertI32:
TurboAssembler::Cvt_s_uw(dst.fp(), src.gp()); MacroAssembler::Cvt_s_uw(dst.fp(), src.gp());
return true; return true;
case kExprF32ConvertF64: case kExprF32ConvertF64:
cvt_s_d(dst.fp(), src.fp()); cvt_s_d(dst.fp(), src.fp());
return true; return true;
case kExprF32ReinterpretI32: case kExprF32ReinterpretI32:
TurboAssembler::FmoveLow(dst.fp(), src.gp()); MacroAssembler::FmoveLow(dst.fp(), src.gp());
return true; return true;
case kExprF64SConvertI32: { case kExprF64SConvertI32: {
LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst}); LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
@ -1669,7 +1669,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true; return true;
} }
case kExprF64UConvertI32: case kExprF64UConvertI32:
TurboAssembler::Cvt_d_uw(dst.fp(), src.gp()); MacroAssembler::Cvt_d_uw(dst.fp(), src.gp());
return true; return true;
case kExprF64ConvertF32: case kExprF64ConvertF32:
cvt_d_s(dst.fp(), src.fp()); cvt_d_s(dst.fp(), src.fp());
@ -1688,7 +1688,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
CompareIsNanF32(src.fp(), src.fp()); CompareIsNanF32(src.fp(), src.fp());
BranchTrueShortF(&done); BranchTrueShortF(&done);
li(dst.gp(), static_cast<int32_t>(std::numeric_limits<int32_t>::min())); li(dst.gp(), static_cast<int32_t>(std::numeric_limits<int32_t>::min()));
TurboAssembler::Move( MacroAssembler::Move(
kScratchDoubleReg, kScratchDoubleReg,
static_cast<float>(std::numeric_limits<int32_t>::min())); static_cast<float>(std::numeric_limits<int32_t>::min()));
CompareF32(OLT, src.fp(), kScratchDoubleReg); CompareF32(OLT, src.fp(), kScratchDoubleReg);
@ -1702,7 +1702,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI32UConvertSatF32: { case kExprI32UConvertSatF32: {
Label isnan_or_lessthan_or_equal_zero; Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg); mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0)); MacroAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
CompareF32(ULE, src.fp(), kScratchDoubleReg); CompareF32(ULE, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Trunc_uw_s(dst.gp(), src.fp(), kScratchDoubleReg); Trunc_uw_s(dst.gp(), src.fp(), kScratchDoubleReg);
@ -1719,7 +1719,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
CompareIsNanF64(src.fp(), src.fp()); CompareIsNanF64(src.fp(), src.fp());
BranchTrueShortF(&done); BranchTrueShortF(&done);
li(dst.gp(), static_cast<int32_t>(std::numeric_limits<int32_t>::min())); li(dst.gp(), static_cast<int32_t>(std::numeric_limits<int32_t>::min()));
TurboAssembler::Move( MacroAssembler::Move(
kScratchDoubleReg, kScratchDoubleReg,
static_cast<double>(std::numeric_limits<int32_t>::min())); static_cast<double>(std::numeric_limits<int32_t>::min()));
CompareF64(OLT, src.fp(), kScratchDoubleReg); CompareF64(OLT, src.fp(), kScratchDoubleReg);
@ -1733,7 +1733,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI32UConvertSatF64: { case kExprI32UConvertSatF64: {
Label isnan_or_lessthan_or_equal_zero; Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg); mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0)); MacroAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
CompareF64(ULE, src.fp(), kScratchDoubleReg); CompareF64(ULE, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Trunc_uw_d(dst.gp(), src.fp(), kScratchDoubleReg); Trunc_uw_d(dst.gp(), src.fp(), kScratchDoubleReg);
@ -1750,7 +1750,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
CompareIsNanF32(src.fp(), src.fp()); CompareIsNanF32(src.fp(), src.fp());
BranchTrueShortF(&done); BranchTrueShortF(&done);
li(dst.gp(), static_cast<int64_t>(std::numeric_limits<int64_t>::min())); li(dst.gp(), static_cast<int64_t>(std::numeric_limits<int64_t>::min()));
TurboAssembler::Move( MacroAssembler::Move(
kScratchDoubleReg, kScratchDoubleReg,
static_cast<float>(std::numeric_limits<int64_t>::min())); static_cast<float>(std::numeric_limits<int64_t>::min()));
CompareF32(OLT, src.fp(), kScratchDoubleReg); CompareF32(OLT, src.fp(), kScratchDoubleReg);
@ -1764,7 +1764,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI64UConvertSatF32: { case kExprI64UConvertSatF32: {
Label isnan_or_lessthan_or_equal_zero; Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg); mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0)); MacroAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
CompareF32(ULE, src.fp(), kScratchDoubleReg); CompareF32(ULE, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, no_reg); Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, no_reg);
@ -1781,7 +1781,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
CompareIsNanF64(src.fp(), src.fp()); CompareIsNanF64(src.fp(), src.fp());
BranchTrueShortF(&done); BranchTrueShortF(&done);
li(dst.gp(), static_cast<int64_t>(std::numeric_limits<int64_t>::min())); li(dst.gp(), static_cast<int64_t>(std::numeric_limits<int64_t>::min()));
TurboAssembler::Move( MacroAssembler::Move(
kScratchDoubleReg, kScratchDoubleReg,
static_cast<double>(std::numeric_limits<int64_t>::min())); static_cast<double>(std::numeric_limits<int64_t>::min()));
CompareF64(OLT, src.fp(), kScratchDoubleReg); CompareF64(OLT, src.fp(), kScratchDoubleReg);
@ -1795,7 +1795,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI64UConvertSatF64: { case kExprI64UConvertSatF64: {
Label isnan_or_lessthan_or_equal_zero; Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg); mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0)); MacroAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
CompareF64(ULE, src.fp(), kScratchDoubleReg); CompareF64(ULE, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, no_reg); Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, no_reg);
@ -1831,11 +1831,11 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
} }
void LiftoffAssembler::emit_jump(Label* label) { void LiftoffAssembler::emit_jump(Label* label) {
TurboAssembler::Branch(label); MacroAssembler::Branch(label);
} }
void LiftoffAssembler::emit_jump(Register target) { void LiftoffAssembler::emit_jump(Register target) {
TurboAssembler::Jump(target); MacroAssembler::Jump(target);
} }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
@ -1844,25 +1844,25 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
const FreezeCacheState& frozen) { const FreezeCacheState& frozen) {
if (rhs == no_reg) { if (rhs == no_reg) {
DCHECK(kind == kI32 || kind == kI64); DCHECK(kind == kI32 || kind == kI64);
TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else { } else {
DCHECK((kind == kI32 || kind == kI64) || DCHECK((kind == kI32 || kind == kI64) ||
(is_reference(kind) && (cond == kEqual || cond == kNotEqual))); (is_reference(kind) && (cond == kEqual || cond == kNotEqual)));
TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); MacroAssembler::Branch(label, cond, lhs, Operand(rhs));
} }
} }
void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
Register lhs, int32_t imm, Register lhs, int32_t imm,
const FreezeCacheState& frozen) { const FreezeCacheState& frozen) {
TurboAssembler::Branch(label, cond, lhs, Operand(imm)); MacroAssembler::Branch(label, cond, lhs, Operand(imm));
} }
void LiftoffAssembler::emit_i32_subi_jump_negative( void LiftoffAssembler::emit_i32_subi_jump_negative(
Register value, int subtrahend, Label* result_negative, Register value, int subtrahend, Label* result_negative,
const FreezeCacheState& frozen) { const FreezeCacheState& frozen) {
TurboAssembler::Dsubu(value, value, Operand(subtrahend)); MacroAssembler::Dsubu(value, value, Operand(subtrahend));
TurboAssembler::Branch(result_negative, less, value, Operand(zero_reg)); MacroAssembler::Branch(result_negative, less, value, Operand(zero_reg));
} }
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
@ -1876,14 +1876,14 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp(); tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
} }
// Write 1 as result. // Write 1 as result.
TurboAssembler::li(tmp, 1); MacroAssembler::li(tmp, 1);
// If negative condition is true, write 0 as result. // If negative condition is true, write 0 as result.
Condition neg_cond = NegateCondition(cond); Condition neg_cond = NegateCondition(cond);
TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); MacroAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
// If tmp != dst, result will be moved. // If tmp != dst, result will be moved.
TurboAssembler::Move(dst, tmp); MacroAssembler::Move(dst, tmp);
} }
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
@ -1898,15 +1898,15 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp(); tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
} }
// Write 1 as result. // Write 1 as result.
TurboAssembler::li(tmp, 1); MacroAssembler::li(tmp, 1);
// If negative condition is true, write 0 as result. // If negative condition is true, write 0 as result.
Condition neg_cond = NegateCondition(cond); Condition neg_cond = NegateCondition(cond);
TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), MacroAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
neg_cond); neg_cond);
// If tmp != dst, result will be moved. // If tmp != dst, result will be moved.
TurboAssembler::Move(dst, tmp); MacroAssembler::Move(dst, tmp);
} }
namespace liftoff { namespace liftoff {
@ -1965,26 +1965,26 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
Label not_nan, cont; Label not_nan, cont;
TurboAssembler::CompareIsNanF32(lhs, rhs); MacroAssembler::CompareIsNanF32(lhs, rhs);
TurboAssembler::BranchFalseF(&not_nan); MacroAssembler::BranchFalseF(&not_nan);
// If one of the operands is NaN, return 1 for f32.ne, else 0. // If one of the operands is NaN, return 1 for f32.ne, else 0.
if (cond == ne) { if (cond == ne) {
TurboAssembler::li(dst, 1); MacroAssembler::li(dst, 1);
} else { } else {
TurboAssembler::Move(dst, zero_reg); MacroAssembler::Move(dst, zero_reg);
} }
TurboAssembler::Branch(&cont); MacroAssembler::Branch(&cont);
bind(&not_nan); bind(&not_nan);
TurboAssembler::li(dst, 1); MacroAssembler::li(dst, 1);
bool predicate; bool predicate;
FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF32(fcond, lhs, rhs); MacroAssembler::CompareF32(fcond, lhs, rhs);
if (predicate) { if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst); MacroAssembler::LoadZeroIfNotFPUCondition(dst);
} else { } else {
TurboAssembler::LoadZeroIfFPUCondition(dst); MacroAssembler::LoadZeroIfFPUCondition(dst);
} }
bind(&cont); bind(&cont);
@ -1994,26 +1994,26 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
DoubleRegister lhs, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
Label not_nan, cont; Label not_nan, cont;
TurboAssembler::CompareIsNanF64(lhs, rhs); MacroAssembler::CompareIsNanF64(lhs, rhs);
TurboAssembler::BranchFalseF(&not_nan); MacroAssembler::BranchFalseF(&not_nan);
// If one of the operands is NaN, return 1 for f64.ne, else 0. // If one of the operands is NaN, return 1 for f64.ne, else 0.
if (cond == ne) { if (cond == ne) {
TurboAssembler::li(dst, 1); MacroAssembler::li(dst, 1);
} else { } else {
TurboAssembler::Move(dst, zero_reg); MacroAssembler::Move(dst, zero_reg);
} }
TurboAssembler::Branch(&cont); MacroAssembler::Branch(&cont);
bind(&not_nan); bind(&not_nan);
TurboAssembler::li(dst, 1); MacroAssembler::li(dst, 1);
bool predicate; bool predicate;
FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF64(fcond, lhs, rhs); MacroAssembler::CompareF64(fcond, lhs, rhs);
if (predicate) { if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst); MacroAssembler::LoadZeroIfNotFPUCondition(dst);
} else { } else {
TurboAssembler::LoadZeroIfFPUCondition(dst); MacroAssembler::LoadZeroIfFPUCondition(dst);
} }
bind(&cont); bind(&cont);
@ -2111,7 +2111,7 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
MemOperand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm); MemOperand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
*protected_load_pc = pc_offset(); *protected_load_pc = pc_offset();
LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx); LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx);
TurboAssembler::LoadLane(load_params.sz, dst.fp().toW(), laneidx, src_op); MacroAssembler::LoadLane(load_params.sz, dst.fp().toW(), laneidx, src_op);
} }
void LiftoffAssembler::StoreLane(Register dst, Register offset, void LiftoffAssembler::StoreLane(Register dst, Register offset,
@ -2121,7 +2121,7 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset,
MemOperand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm); MemOperand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm);
if (protected_store_pc) *protected_store_pc = pc_offset(); if (protected_store_pc) *protected_store_pc = pc_offset();
LoadStoreLaneParams store_params(type.mem_rep(), lane); LoadStoreLaneParams store_params(type.mem_rep(), lane);
TurboAssembler::StoreLane(store_params.sz, src.fp().toW(), lane, dst_op); MacroAssembler::StoreLane(store_params.sz, src.fp().toW(), lane, dst_op);
} }
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
@ -2228,25 +2228,25 @@ void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst, void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
TurboAssembler::FmoveLow(kScratchReg, src.fp()); MacroAssembler::FmoveLow(kScratchReg, src.fp());
fill_w(dst.fp().toW(), kScratchReg); fill_w(dst.fp().toW(), kScratchReg);
} }
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst, void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
TurboAssembler::Move(kScratchReg, src.fp()); MacroAssembler::Move(kScratchReg, src.fp());
fill_d(dst.fp().toW(), kScratchReg); fill_d(dst.fp().toW(), kScratchReg);
} }
#define SIMD_BINOP(name1, name2, type) \ #define SIMD_BINOP(name1, name2, type) \
void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \ void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \
LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
TurboAssembler::ExtMulLow(type, dst.fp().toW(), src1.fp().toW(), \ MacroAssembler::ExtMulLow(type, dst.fp().toW(), src1.fp().toW(), \
src2.fp().toW()); \ src2.fp().toW()); \
} \ } \
void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \ void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \
LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
TurboAssembler::ExtMulHigh(type, dst.fp().toW(), src1.fp().toW(), \ MacroAssembler::ExtMulHigh(type, dst.fp().toW(), src1.fp().toW(), \
src2.fp().toW()); \ src2.fp().toW()); \
} }
@ -2264,7 +2264,7 @@ SIMD_BINOP(i64x2, i32x4_u, MSAU32)
#define SIMD_BINOP(name1, name2, type) \ #define SIMD_BINOP(name1, name2, type) \
void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \ void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \
LiftoffRegister dst, LiftoffRegister src) { \ LiftoffRegister dst, LiftoffRegister src) { \
TurboAssembler::ExtAddPairwise(type, dst.fp().toW(), src.fp().toW()); \ MacroAssembler::ExtAddPairwise(type, dst.fp().toW(), src.fp().toW()); \
} }
SIMD_BINOP(i16x8, i8x16_s, MSAS8) SIMD_BINOP(i16x8, i8x16_s, MSAS8)
@ -3455,14 +3455,14 @@ void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs, LiftoffRegister lhs,
uint8_t imm_lane_idx) { uint8_t imm_lane_idx) {
copy_u_w(kScratchReg, lhs.fp().toW(), imm_lane_idx); copy_u_w(kScratchReg, lhs.fp().toW(), imm_lane_idx);
TurboAssembler::FmoveLow(dst.fp(), kScratchReg); MacroAssembler::FmoveLow(dst.fp(), kScratchReg);
} }
void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst, void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs, LiftoffRegister lhs,
uint8_t imm_lane_idx) { uint8_t imm_lane_idx) {
copy_s_d(kScratchReg, lhs.fp().toW(), imm_lane_idx); copy_s_d(kScratchReg, lhs.fp().toW(), imm_lane_idx);
TurboAssembler::Move(dst.fp(), kScratchReg); MacroAssembler::Move(dst.fp(), kScratchReg);
} }
void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
@ -3509,7 +3509,7 @@ void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
LiftoffRegister src1, LiftoffRegister src1,
LiftoffRegister src2, LiftoffRegister src2,
uint8_t imm_lane_idx) { uint8_t imm_lane_idx) {
TurboAssembler::FmoveLow(kScratchReg, src2.fp()); MacroAssembler::FmoveLow(kScratchReg, src2.fp());
if (dst != src1) { if (dst != src1) {
move_v(dst.fp().toW(), src1.fp().toW()); move_v(dst.fp().toW(), src1.fp().toW());
} }
@ -3520,7 +3520,7 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1, LiftoffRegister src1,
LiftoffRegister src2, LiftoffRegister src2,
uint8_t imm_lane_idx) { uint8_t imm_lane_idx) {
TurboAssembler::Move(kScratchReg, src2.fp()); MacroAssembler::Move(kScratchReg, src2.fp());
if (dst != src1) { if (dst != src1) {
move_v(dst.fp().toW(), src1.fp().toW()); move_v(dst.fp().toW(), src1.fp().toW());
} }
@ -3556,8 +3556,8 @@ void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst,
} }
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
TurboAssembler::Uld(limit_address, MemOperand(limit_address)); MacroAssembler::Uld(limit_address, MemOperand(limit_address));
TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
} }
void LiftoffAssembler::CallTrapCallbackForTesting() { void LiftoffAssembler::CallTrapCallbackForTesting() {
@ -3592,9 +3592,9 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
while (!fp_regs.is_empty()) { while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet(); LiftoffRegister reg = fp_regs.GetFirstRegSet();
if (IsEnabled(MIPS_SIMD)) { if (IsEnabled(MIPS_SIMD)) {
TurboAssembler::st_d(reg.fp().toW(), MemOperand(sp, offset)); MacroAssembler::st_d(reg.fp().toW(), MemOperand(sp, offset));
} else { } else {
TurboAssembler::Sdc1(reg.fp(), MemOperand(sp, offset)); MacroAssembler::Sdc1(reg.fp(), MemOperand(sp, offset));
} }
fp_regs.clear(reg); fp_regs.clear(reg);
offset += slot_size; offset += slot_size;
@ -3609,9 +3609,9 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
while (!fp_regs.is_empty()) { while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet(); LiftoffRegister reg = fp_regs.GetFirstRegSet();
if (IsEnabled(MIPS_SIMD)) { if (IsEnabled(MIPS_SIMD)) {
TurboAssembler::ld_d(reg.fp().toW(), MemOperand(sp, fp_offset)); MacroAssembler::ld_d(reg.fp().toW(), MemOperand(sp, fp_offset));
} else { } else {
TurboAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset)); MacroAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset));
} }
fp_regs.clear(reg); fp_regs.clear(reg);
fp_offset += (IsEnabled(MIPS_SIMD) ? 16 : 8); fp_offset += (IsEnabled(MIPS_SIMD) ? 16 : 8);
@ -3648,7 +3648,7 @@ void LiftoffAssembler::RecordSpillsInSafepoint(
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
DCHECK_LT(num_stack_slots, DCHECK_LT(num_stack_slots,
(1 << 16) / kSystemPointerSize); // 16 bit immediate (1 << 16) / kSystemPointerSize); // 16 bit immediate
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots)); MacroAssembler::DropAndRet(static_cast<int>(num_stack_slots));
} }
void LiftoffAssembler::CallC(const ValueKindSig* sig, void LiftoffAssembler::CallC(const ValueKindSig* sig,
@ -3730,7 +3730,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
Daddu(sp, sp, -size); Daddu(sp, sp, -size);
TurboAssembler::Move(addr, sp); MacroAssembler::Move(addr, sp);
} }
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {

View File

@ -197,7 +197,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
bind(&continuation); bind(&continuation);
// Now allocate the stack space. Note that this might do more than just // Now allocate the stack space. Note that this might do more than just
// decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}.
SubS64(sp, sp, Operand(frame_size), r0); SubS64(sp, sp, Operand(frame_size), r0);
// Jump back to the start of the function, from {pc_offset()} to // Jump back to the start of the function, from {pc_offset()} to
@ -692,7 +692,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
switch (type.value()) { switch (type.value()) {
case StoreType::kI32Store8: case StoreType::kI32Store8:
case StoreType::kI64Store8: { case StoreType::kI64Store8: {
TurboAssembler::AtomicExchange<uint8_t>(dst, value.gp(), result.gp()); MacroAssembler::AtomicExchange<uint8_t>(dst, value.gp(), result.gp());
break; break;
} }
case StoreType::kI32Store16: case StoreType::kI32Store16:
@ -702,10 +702,10 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
push(scratch); push(scratch);
ByteReverseU16(r0, value.gp(), scratch); ByteReverseU16(r0, value.gp(), scratch);
pop(scratch); pop(scratch);
TurboAssembler::AtomicExchange<uint16_t>(dst, r0, result.gp()); MacroAssembler::AtomicExchange<uint16_t>(dst, r0, result.gp());
ByteReverseU16(result.gp(), result.gp(), ip); ByteReverseU16(result.gp(), result.gp(), ip);
} else { } else {
TurboAssembler::AtomicExchange<uint16_t>(dst, value.gp(), result.gp()); MacroAssembler::AtomicExchange<uint16_t>(dst, value.gp(), result.gp());
} }
break; break;
} }
@ -716,20 +716,20 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
push(scratch); push(scratch);
ByteReverseU32(r0, value.gp(), scratch); ByteReverseU32(r0, value.gp(), scratch);
pop(scratch); pop(scratch);
TurboAssembler::AtomicExchange<uint32_t>(dst, r0, result.gp()); MacroAssembler::AtomicExchange<uint32_t>(dst, r0, result.gp());
ByteReverseU32(result.gp(), result.gp(), ip); ByteReverseU32(result.gp(), result.gp(), ip);
} else { } else {
TurboAssembler::AtomicExchange<uint32_t>(dst, value.gp(), result.gp()); MacroAssembler::AtomicExchange<uint32_t>(dst, value.gp(), result.gp());
} }
break; break;
} }
case StoreType::kI64Store: { case StoreType::kI64Store: {
if (is_be) { if (is_be) {
ByteReverseU64(r0, value.gp()); ByteReverseU64(r0, value.gp());
TurboAssembler::AtomicExchange<uint64_t>(dst, r0, result.gp()); MacroAssembler::AtomicExchange<uint64_t>(dst, r0, result.gp());
ByteReverseU64(result.gp(), result.gp()); ByteReverseU64(result.gp(), result.gp());
} else { } else {
TurboAssembler::AtomicExchange<uint64_t>(dst, value.gp(), result.gp()); MacroAssembler::AtomicExchange<uint64_t>(dst, value.gp(), result.gp());
} }
break; break;
} }
@ -760,7 +760,7 @@ void LiftoffAssembler::AtomicCompareExchange(
switch (type.value()) { switch (type.value()) {
case StoreType::kI32Store8: case StoreType::kI32Store8:
case StoreType::kI64Store8: { case StoreType::kI64Store8: {
TurboAssembler::AtomicCompareExchange<uint8_t>( MacroAssembler::AtomicCompareExchange<uint8_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0); dst, expected.gp(), new_value.gp(), result.gp(), r0);
break; break;
} }
@ -774,12 +774,12 @@ void LiftoffAssembler::AtomicCompareExchange(
ByteReverseU16(new_value.gp(), new_value.gp(), scratch); ByteReverseU16(new_value.gp(), new_value.gp(), scratch);
ByteReverseU16(expected.gp(), expected.gp(), scratch); ByteReverseU16(expected.gp(), expected.gp(), scratch);
pop(scratch); pop(scratch);
TurboAssembler::AtomicCompareExchange<uint16_t>( MacroAssembler::AtomicCompareExchange<uint16_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0); dst, expected.gp(), new_value.gp(), result.gp(), r0);
ByteReverseU16(result.gp(), result.gp(), r0); ByteReverseU16(result.gp(), result.gp(), r0);
Pop(new_value.gp(), expected.gp()); Pop(new_value.gp(), expected.gp());
} else { } else {
TurboAssembler::AtomicCompareExchange<uint16_t>( MacroAssembler::AtomicCompareExchange<uint16_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0); dst, expected.gp(), new_value.gp(), result.gp(), r0);
} }
break; break;
@ -794,12 +794,12 @@ void LiftoffAssembler::AtomicCompareExchange(
ByteReverseU32(new_value.gp(), new_value.gp(), scratch); ByteReverseU32(new_value.gp(), new_value.gp(), scratch);
ByteReverseU32(expected.gp(), expected.gp(), scratch); ByteReverseU32(expected.gp(), expected.gp(), scratch);
pop(scratch); pop(scratch);
TurboAssembler::AtomicCompareExchange<uint32_t>( MacroAssembler::AtomicCompareExchange<uint32_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0); dst, expected.gp(), new_value.gp(), result.gp(), r0);
ByteReverseU32(result.gp(), result.gp(), r0); ByteReverseU32(result.gp(), result.gp(), r0);
Pop(new_value.gp(), expected.gp()); Pop(new_value.gp(), expected.gp());
} else { } else {
TurboAssembler::AtomicCompareExchange<uint32_t>( MacroAssembler::AtomicCompareExchange<uint32_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0); dst, expected.gp(), new_value.gp(), result.gp(), r0);
} }
break; break;
@ -809,12 +809,12 @@ void LiftoffAssembler::AtomicCompareExchange(
Push(new_value.gp(), expected.gp()); Push(new_value.gp(), expected.gp());
ByteReverseU64(new_value.gp(), new_value.gp()); ByteReverseU64(new_value.gp(), new_value.gp());
ByteReverseU64(expected.gp(), expected.gp()); ByteReverseU64(expected.gp(), expected.gp());
TurboAssembler::AtomicCompareExchange<uint64_t>( MacroAssembler::AtomicCompareExchange<uint64_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0); dst, expected.gp(), new_value.gp(), result.gp(), r0);
ByteReverseU64(result.gp(), result.gp()); ByteReverseU64(result.gp(), result.gp());
Pop(new_value.gp(), expected.gp()); Pop(new_value.gp(), expected.gp());
} else { } else {
TurboAssembler::AtomicCompareExchange<uint64_t>( MacroAssembler::AtomicCompareExchange<uint64_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0); dst, expected.gp(), new_value.gp(), result.gp(), r0);
} }
break; break;

View File

@ -73,7 +73,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// We can't run out of space, just pass anything big enough to not cause the // We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer. // assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256; constexpr int kAvailableSpace = 256;
TurboAssembler patching_assembler( MacroAssembler patching_assembler(
nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
@ -206,21 +206,21 @@ void LiftoffAssembler::SpillInstance(Register instance) {
void LiftoffAssembler::ResetOSRTarget() {} void LiftoffAssembler::ResetOSRTarget() {}
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
TurboAssembler::Neg_s(dst, src); MacroAssembler::Neg_s(dst, src);
} }
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
TurboAssembler::Neg_d(dst, src); MacroAssembler::Neg_d(dst, src);
} }
void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
TurboAssembler::Float32Min(dst, lhs, rhs); MacroAssembler::Float32Min(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
TurboAssembler::Float32Max(dst, lhs, rhs); MacroAssembler::Float32Max(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
@ -230,12 +230,12 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
TurboAssembler::Float64Min(dst, lhs, rhs); MacroAssembler::Float64Min(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
TurboAssembler::Float64Max(dst, lhs, rhs); MacroAssembler::Float64Max(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
@ -302,14 +302,14 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
FPUCondition fcond = ConditionToConditionCmpFPU(cond); FPUCondition fcond = ConditionToConditionCmpFPU(cond);
TurboAssembler::CompareF32(dst, fcond, lhs, rhs); MacroAssembler::CompareF32(dst, fcond, lhs, rhs);
} }
void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
DoubleRegister lhs, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
FPUCondition fcond = ConditionToConditionCmpFPU(cond); FPUCondition fcond = ConditionToConditionCmpFPU(cond);
TurboAssembler::CompareF64(dst, fcond, lhs, rhs); MacroAssembler::CompareF64(dst, fcond, lhs, rhs);
} }
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition, bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
@ -2070,8 +2070,8 @@ void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst,
} }
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
TurboAssembler::LoadWord(limit_address, MemOperand(limit_address)); MacroAssembler::LoadWord(limit_address, MemOperand(limit_address));
TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
} }
void LiftoffAssembler::CallTrapCallbackForTesting() { void LiftoffAssembler::CallTrapCallbackForTesting() {
@ -2104,7 +2104,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
int32_t offset = 0; int32_t offset = 0;
while (!fp_regs.is_empty()) { while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet(); LiftoffRegister reg = fp_regs.GetFirstRegSet();
TurboAssembler::StoreDouble(reg.fp(), MemOperand(sp, offset)); MacroAssembler::StoreDouble(reg.fp(), MemOperand(sp, offset));
fp_regs.clear(reg); fp_regs.clear(reg);
offset += sizeof(double); offset += sizeof(double);
} }
@ -2117,7 +2117,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
int32_t fp_offset = 0; int32_t fp_offset = 0;
while (!fp_regs.is_empty()) { while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet(); LiftoffRegister reg = fp_regs.GetFirstRegSet();
TurboAssembler::LoadDouble(reg.fp(), MemOperand(sp, fp_offset)); MacroAssembler::LoadDouble(reg.fp(), MemOperand(sp, fp_offset));
fp_regs.clear(reg); fp_regs.clear(reg);
fp_offset += sizeof(double); fp_offset += sizeof(double);
} }
@ -2151,7 +2151,7 @@ void LiftoffAssembler::RecordSpillsInSafepoint(
} }
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots)); MacroAssembler::DropAndRet(static_cast<int>(num_stack_slots));
} }
void LiftoffAssembler::CallNativeWasmCode(Address addr) { void LiftoffAssembler::CallNativeWasmCode(Address addr) {
@ -2190,7 +2190,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
AddWord(sp, sp, Operand(-size)); AddWord(sp, sp, Operand(-size));
TurboAssembler::Move(addr, sp); MacroAssembler::Move(addr, sp);
} }
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {

View File

@ -178,22 +178,22 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) { RelocInfo::Mode rmode) {
switch (value.type().kind()) { switch (value.type().kind()) {
case kI32: case kI32:
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); MacroAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break; break;
case kI64: { case kI64: {
DCHECK(RelocInfo::IsNoInfo(rmode)); DCHECK(RelocInfo::IsNoInfo(rmode));
int32_t low_word = value.to_i64(); int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32; int32_t high_word = value.to_i64() >> 32;
TurboAssembler::li(reg.low_gp(), Operand(low_word)); MacroAssembler::li(reg.low_gp(), Operand(low_word));
TurboAssembler::li(reg.high_gp(), Operand(high_word)); MacroAssembler::li(reg.high_gp(), Operand(high_word));
break; break;
} }
case kF32: case kF32:
TurboAssembler::LoadFPRImmediate(reg.fp(), MacroAssembler::LoadFPRImmediate(reg.fp(),
value.to_f32_boxed().get_bits()); value.to_f32_boxed().get_bits());
break; break;
case kF64: case kF64:
TurboAssembler::LoadFPRImmediate(reg.fp(), MacroAssembler::LoadFPRImmediate(reg.fp(),
value.to_f64_boxed().get_bits()); value.to_f64_boxed().get_bits());
break; break;
default: default:
@ -262,39 +262,39 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break; break;
case LoadType::kI64Load8U: case LoadType::kI64Load8U:
Lbu(dst.low_gp(), src_op); Lbu(dst.low_gp(), src_op);
TurboAssembler::mv(dst.high_gp(), zero_reg); MacroAssembler::mv(dst.high_gp(), zero_reg);
break; break;
case LoadType::kI32Load8S: case LoadType::kI32Load8S:
Lb(dst.gp(), src_op); Lb(dst.gp(), src_op);
break; break;
case LoadType::kI64Load8S: case LoadType::kI64Load8S:
Lb(dst.low_gp(), src_op); Lb(dst.low_gp(), src_op);
TurboAssembler::srai(dst.high_gp(), dst.low_gp(), 31); MacroAssembler::srai(dst.high_gp(), dst.low_gp(), 31);
break; break;
case LoadType::kI32Load16U: case LoadType::kI32Load16U:
TurboAssembler::Lhu(dst.gp(), src_op); MacroAssembler::Lhu(dst.gp(), src_op);
break; break;
case LoadType::kI64Load16U: case LoadType::kI64Load16U:
TurboAssembler::Lhu(dst.low_gp(), src_op); MacroAssembler::Lhu(dst.low_gp(), src_op);
TurboAssembler::mv(dst.high_gp(), zero_reg); MacroAssembler::mv(dst.high_gp(), zero_reg);
break; break;
case LoadType::kI32Load16S: case LoadType::kI32Load16S:
TurboAssembler::Lh(dst.gp(), src_op); MacroAssembler::Lh(dst.gp(), src_op);
break; break;
case LoadType::kI64Load16S: case LoadType::kI64Load16S:
TurboAssembler::Lh(dst.low_gp(), src_op); MacroAssembler::Lh(dst.low_gp(), src_op);
TurboAssembler::srai(dst.high_gp(), dst.low_gp(), 31); MacroAssembler::srai(dst.high_gp(), dst.low_gp(), 31);
break; break;
case LoadType::kI64Load32U: case LoadType::kI64Load32U:
TurboAssembler::Lw(dst.low_gp(), src_op); MacroAssembler::Lw(dst.low_gp(), src_op);
TurboAssembler::mv(dst.high_gp(), zero_reg); MacroAssembler::mv(dst.high_gp(), zero_reg);
break; break;
case LoadType::kI64Load32S: case LoadType::kI64Load32S:
TurboAssembler::Lw(dst.low_gp(), src_op); MacroAssembler::Lw(dst.low_gp(), src_op);
TurboAssembler::srai(dst.high_gp(), dst.low_gp(), 31); MacroAssembler::srai(dst.high_gp(), dst.low_gp(), 31);
break; break;
case LoadType::kI32Load: case LoadType::kI32Load:
TurboAssembler::Lw(dst.gp(), src_op); MacroAssembler::Lw(dst.gp(), src_op);
break; break;
case LoadType::kI64Load: { case LoadType::kI64Load: {
Lw(dst.low_gp(), src_op); Lw(dst.low_gp(), src_op);
@ -303,16 +303,16 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Lw(dst.high_gp(), src_op); Lw(dst.high_gp(), src_op);
} break; } break;
case LoadType::kF32Load: case LoadType::kF32Load:
TurboAssembler::LoadFloat(dst.fp(), src_op); MacroAssembler::LoadFloat(dst.fp(), src_op);
break; break;
case LoadType::kF64Load: case LoadType::kF64Load:
TurboAssembler::LoadDouble(dst.fp(), src_op); MacroAssembler::LoadDouble(dst.fp(), src_op);
break; break;
case LoadType::kS128Load: { case LoadType::kS128Load: {
VU.set(kScratchReg, E8, m1); VU.set(kScratchReg, E8, m1);
Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg; Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg;
if (src_op.offset() != 0) { if (src_op.offset() != 0) {
TurboAssembler::AddWord(src_reg, src_op.rm(), src_op.offset()); MacroAssembler::AddWord(src_reg, src_op.rm(), src_op.offset());
} }
vl(dst.fp().toV(), src_reg, 0, E8); vl(dst.fp().toV(), src_reg, 0, E8);
break; break;
@ -362,29 +362,29 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
Sb(src.low_gp(), dst_op); Sb(src.low_gp(), dst_op);
break; break;
case StoreType::kI32Store16: case StoreType::kI32Store16:
TurboAssembler::Sh(src.gp(), dst_op); MacroAssembler::Sh(src.gp(), dst_op);
break; break;
case StoreType::kI64Store16: case StoreType::kI64Store16:
TurboAssembler::Sh(src.low_gp(), dst_op); MacroAssembler::Sh(src.low_gp(), dst_op);
break; break;
case StoreType::kI32Store: case StoreType::kI32Store:
TurboAssembler::Sw(src.gp(), dst_op); MacroAssembler::Sw(src.gp(), dst_op);
break; break;
case StoreType::kI64Store32: case StoreType::kI64Store32:
TurboAssembler::Sw(src.low_gp(), dst_op); MacroAssembler::Sw(src.low_gp(), dst_op);
break; break;
case StoreType::kI64Store: { case StoreType::kI64Store: {
TurboAssembler::Sw(src.low_gp(), dst_op); MacroAssembler::Sw(src.low_gp(), dst_op);
dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg,
offset_imm + kSystemPointerSize, scratch); offset_imm + kSystemPointerSize, scratch);
TurboAssembler::Sw(src.high_gp(), dst_op); MacroAssembler::Sw(src.high_gp(), dst_op);
break; break;
} }
case StoreType::kF32Store: case StoreType::kF32Store:
TurboAssembler::StoreFloat(src.fp(), dst_op); MacroAssembler::StoreFloat(src.fp(), dst_op);
break; break;
case StoreType::kF64Store: case StoreType::kF64Store:
TurboAssembler::StoreDouble(src.fp(), dst_op); MacroAssembler::StoreDouble(src.fp(), dst_op);
break; break;
case StoreType::kS128Store: { case StoreType::kS128Store: {
VU.set(kScratchReg, E8, m1); VU.set(kScratchReg, E8, m1);
@ -926,14 +926,14 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
StoreFloat(kScratchDoubleReg, dst); StoreFloat(kScratchDoubleReg, dst);
break; break;
case kF64: case kF64:
TurboAssembler::LoadDouble(kScratchDoubleReg, src); MacroAssembler::LoadDouble(kScratchDoubleReg, src);
TurboAssembler::StoreDouble(kScratchDoubleReg, dst); MacroAssembler::StoreDouble(kScratchDoubleReg, dst);
break; break;
case kS128: { case kS128: {
VU.set(kScratchReg, E8, m1); VU.set(kScratchReg, E8, m1);
Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg; Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
if (src.offset() != 0) { if (src.offset() != 0) {
TurboAssembler::AddWord(src_reg, src.rm(), src.offset()); MacroAssembler::AddWord(src_reg, src.rm(), src.offset());
} }
vl(kSimd128ScratchReg, src_reg, 0, E8); vl(kSimd128ScratchReg, src_reg, 0, E8);
Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg; Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
@ -951,16 +951,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
// TODO(ksreten): Handle different sizes here. // TODO(ksreten): Handle different sizes here.
TurboAssembler::Move(dst, src); MacroAssembler::Move(dst, src);
} }
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) { ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (kind != kS128) { if (kind != kS128) {
TurboAssembler::Move(dst, src); MacroAssembler::Move(dst, src);
} else { } else {
TurboAssembler::vmv_vv(dst.toV(), dst.toV()); MacroAssembler::vmv_vv(dst.toV(), dst.toV());
} }
} }
@ -982,7 +982,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
StoreFloat(reg.fp(), dst); StoreFloat(reg.fp(), dst);
break; break;
case kF64: case kF64:
TurboAssembler::StoreDouble(reg.fp(), dst); MacroAssembler::StoreDouble(reg.fp(), dst);
break; break;
case kS128: { case kS128: {
VU.set(kScratchReg, E8, m1); VU.set(kScratchReg, E8, m1);
@ -1006,7 +1006,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
case kRef: case kRef:
case kRefNull: { case kRefNull: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32())); MacroAssembler::li(tmp.gp(), Operand(value.to_i32()));
Sw(tmp.gp(), dst); Sw(tmp.gp(), dst);
break; break;
} }
@ -1015,8 +1015,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
int32_t low_word = value.to_i64(); int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32; int32_t high_word = value.to_i64() >> 32;
TurboAssembler::li(tmp.low_gp(), Operand(low_word)); MacroAssembler::li(tmp.low_gp(), Operand(low_word));
TurboAssembler::li(tmp.high_gp(), Operand(high_word)); MacroAssembler::li(tmp.high_gp(), Operand(high_word));
Sw(tmp.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord)); Sw(tmp.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
Sw(tmp.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord)); Sw(tmp.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
@ -1046,13 +1046,13 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
LoadFloat(reg.fp(), src); LoadFloat(reg.fp(), src);
break; break;
case kF64: case kF64:
TurboAssembler::LoadDouble(reg.fp(), src); MacroAssembler::LoadDouble(reg.fp(), src);
break; break;
case kS128: { case kS128: {
VU.set(kScratchReg, E8, m1); VU.set(kScratchReg, E8, m1);
Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg; Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
if (src.offset() != 0) { if (src.offset() != 0) {
TurboAssembler::AddWord(src_reg, src.rm(), src.offset()); MacroAssembler::AddWord(src_reg, src.rm(), src.offset());
} }
vl(reg.fp().toV(), src_reg, 0, E8); vl(reg.fp().toV(), src_reg, 0, E8);
break; break;
@ -1140,8 +1140,8 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
// Produce partial popcnts in the two dst registers. // Produce partial popcnts in the two dst registers.
Register src1 = src.high_gp() == dst.low_gp() ? src.high_gp() : src.low_gp(); Register src1 = src.high_gp() == dst.low_gp() ? src.high_gp() : src.low_gp();
Register src2 = src.high_gp() == dst.low_gp() ? src.low_gp() : src.high_gp(); Register src2 = src.high_gp() == dst.low_gp() ? src.low_gp() : src.high_gp();
TurboAssembler::Popcnt32(dst.low_gp(), src1, kScratchReg); MacroAssembler::Popcnt32(dst.low_gp(), src1, kScratchReg);
TurboAssembler::Popcnt32(dst.high_gp(), src2, kScratchReg); MacroAssembler::Popcnt32(dst.high_gp(), src2, kScratchReg);
// Now add the two into the lower dst reg and clear the higher dst reg. // Now add the two into the lower dst reg and clear the higher dst reg.
AddWord(dst.low_gp(), dst.low_gp(), dst.high_gp()); AddWord(dst.low_gp(), dst.low_gp(), dst.high_gp());
mv(dst.high_gp(), zero_reg); mv(dst.high_gp(), zero_reg);
@ -1149,40 +1149,40 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
} }
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul(dst, lhs, rhs); MacroAssembler::Mul(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero, Label* trap_div_by_zero,
Label* trap_div_unrepresentable) { Label* trap_div_unrepresentable) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
// Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
TurboAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne); MacroAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne);
TurboAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne); MacroAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne);
add(kScratchReg, kScratchReg, kScratchReg2); add(kScratchReg, kScratchReg, kScratchReg2);
TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg)); Operand(zero_reg));
TurboAssembler::Div(dst, lhs, rhs); MacroAssembler::Div(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
TurboAssembler::Divu(dst, lhs, rhs); MacroAssembler::Divu(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
TurboAssembler::Mod(dst, lhs, rhs); MacroAssembler::Mod(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
TurboAssembler::Modu(dst, lhs, rhs); MacroAssembler::Modu(dst, lhs, rhs);
} }
#define I32_BINOP(name, instruction) \ #define I32_BINOP(name, instruction) \
@ -1218,15 +1218,15 @@ I32_BINOP_I(xor, Xor)
#undef I32_BINOP_I #undef I32_BINOP_I
void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
TurboAssembler::Clz32(dst, src); MacroAssembler::Clz32(dst, src);
} }
void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
TurboAssembler::Ctz32(dst, src); MacroAssembler::Ctz32(dst, src);
} }
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
TurboAssembler::Popcnt32(dst, src, kScratchReg); MacroAssembler::Popcnt32(dst, src, kScratchReg);
return true; return true;
} }
@ -1254,7 +1254,7 @@ I32_SHIFTOP_I(shr, srli)
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
TurboAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), MacroAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
lhs.high_gp(), rhs.low_gp(), rhs.high_gp(), lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
kScratchReg, kScratchReg2); kScratchReg, kScratchReg2);
} }
@ -1294,7 +1294,7 @@ inline bool IsRegInRegPair(LiftoffRegister pair, Register reg) {
inline void Emit64BitShiftOperation( inline void Emit64BitShiftOperation(
LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src, LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src,
Register amount, Register amount,
void (TurboAssembler::*emit_shift)(Register, Register, Register, Register, void (MacroAssembler::*emit_shift)(Register, Register, Register, Register,
Register, Register, Register)) { Register, Register, Register)) {
LiftoffRegList pinned{dst, src, amount}; LiftoffRegList pinned{dst, src, amount};
@ -1313,8 +1313,8 @@ inline void Emit64BitShiftOperation(
kScratchReg2); kScratchReg2);
// Place result in destination register. // Place result in destination register.
assm->TurboAssembler::Move(dst.high_gp(), tmp.high_gp()); assm->MacroAssembler::Move(dst.high_gp(), tmp.high_gp());
assm->TurboAssembler::Move(dst.low_gp(), tmp.low_gp()); assm->MacroAssembler::Move(dst.low_gp(), tmp.low_gp());
} else { } else {
(assm->*emit_shift)(dst.low_gp(), dst.high_gp(), src.low_gp(), (assm->*emit_shift)(dst.low_gp(), dst.high_gp(), src.low_gp(),
src.high_gp(), amount_capped, kScratchReg, src.high_gp(), amount_capped, kScratchReg,
@ -1325,7 +1325,7 @@ inline void Emit64BitShiftOperation(
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), MacroAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
lhs.high_gp(), rhs.low_gp(), rhs.high_gp(), lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
kScratchReg, kScratchReg2); kScratchReg, kScratchReg2);
} }
@ -1339,16 +1339,16 @@ void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
// TODO(riscv32): are there some optimization we can make without // TODO(riscv32): are there some optimization we can make without
// materializing? // materializing?
TurboAssembler::li(imm_reg.low_gp(), imm_low_word); MacroAssembler::li(imm_reg.low_gp(), imm_low_word);
TurboAssembler::li(imm_reg.high_gp(), imm_high_word); MacroAssembler::li(imm_reg.high_gp(), imm_high_word);
TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), MacroAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
lhs.high_gp(), imm_reg.low_gp(), imm_reg.high_gp(), lhs.high_gp(), imm_reg.low_gp(), imm_reg.high_gp(),
kScratchReg, kScratchReg2); kScratchReg, kScratchReg2);
} }
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
TurboAssembler::SubPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), MacroAssembler::SubPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
lhs.high_gp(), rhs.low_gp(), rhs.high_gp(), lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
kScratchReg, kScratchReg2); kScratchReg, kScratchReg2);
} }
@ -1357,7 +1357,7 @@ void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount) { Register amount) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
liftoff::Emit64BitShiftOperation(this, dst, src, amount, liftoff::Emit64BitShiftOperation(this, dst, src, amount,
&TurboAssembler::ShlPair); &MacroAssembler::ShlPair);
} }
void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
@ -1374,14 +1374,14 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
DCHECK_NE(dst.low_gp(), kScratchReg); DCHECK_NE(dst.low_gp(), kScratchReg);
DCHECK_NE(dst.high_gp(), kScratchReg); DCHECK_NE(dst.high_gp(), kScratchReg);
TurboAssembler::ShlPair(dst.low_gp(), dst.high_gp(), src_low, src_high, MacroAssembler::ShlPair(dst.low_gp(), dst.high_gp(), src_low, src_high,
amount, kScratchReg, kScratchReg2); amount, kScratchReg, kScratchReg2);
} }
void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
Register amount) { Register amount) {
liftoff::Emit64BitShiftOperation(this, dst, src, amount, liftoff::Emit64BitShiftOperation(this, dst, src, amount,
&TurboAssembler::SarPair); &MacroAssembler::SarPair);
} }
void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
@ -1397,14 +1397,14 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
DCHECK_NE(dst.low_gp(), kScratchReg); DCHECK_NE(dst.low_gp(), kScratchReg);
DCHECK_NE(dst.high_gp(), kScratchReg); DCHECK_NE(dst.high_gp(), kScratchReg);
TurboAssembler::SarPair(dst.low_gp(), dst.high_gp(), src_low, src_high, MacroAssembler::SarPair(dst.low_gp(), dst.high_gp(), src_low, src_high,
amount, kScratchReg, kScratchReg2); amount, kScratchReg, kScratchReg2);
} }
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount) { Register amount) {
liftoff::Emit64BitShiftOperation(this, dst, src, amount, liftoff::Emit64BitShiftOperation(this, dst, src, amount,
&TurboAssembler::ShrPair); &MacroAssembler::ShrPair);
} }
void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
@ -1420,7 +1420,7 @@ void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
DCHECK_NE(dst.low_gp(), kScratchReg); DCHECK_NE(dst.low_gp(), kScratchReg);
DCHECK_NE(dst.high_gp(), kScratchReg); DCHECK_NE(dst.high_gp(), kScratchReg);
TurboAssembler::ShrPair(dst.low_gp(), dst.high_gp(), src_low, src_high, MacroAssembler::ShrPair(dst.low_gp(), dst.high_gp(), src_low, src_high,
amount, kScratchReg, kScratchReg2); amount, kScratchReg, kScratchReg2);
} }
@ -1441,7 +1441,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister src, Label* trap) { LiftoffRegister src, Label* trap) {
switch (opcode) { switch (opcode) {
case kExprI32ConvertI64: case kExprI32ConvertI64:
TurboAssembler::Move(dst.gp(), src.low_gp()); MacroAssembler::Move(dst.gp(), src.low_gp());
return true; return true;
case kExprI32SConvertF32: case kExprI32SConvertF32:
case kExprI32UConvertF32: case kExprI32UConvertF32:
@ -1481,22 +1481,22 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
// Checking if trap. // Checking if trap.
if (trap != nullptr) { if (trap != nullptr) {
TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
} }
return true; return true;
} }
case kExprI32ReinterpretF32: case kExprI32ReinterpretF32:
TurboAssembler::ExtractLowWordFromF64(dst.gp(), src.fp()); MacroAssembler::ExtractLowWordFromF64(dst.gp(), src.fp());
return true; return true;
case kExprI64SConvertI32: case kExprI64SConvertI32:
TurboAssembler::Move(dst.low_gp(), src.gp()); MacroAssembler::Move(dst.low_gp(), src.gp());
TurboAssembler::Move(dst.high_gp(), src.gp()); MacroAssembler::Move(dst.high_gp(), src.gp());
srai(dst.high_gp(), dst.high_gp(), 31); srai(dst.high_gp(), dst.high_gp(), 31);
return true; return true;
case kExprI64UConvertI32: case kExprI64UConvertI32:
TurboAssembler::Move(dst.low_gp(), src.gp()); MacroAssembler::Move(dst.low_gp(), src.gp());
TurboAssembler::Move(dst.high_gp(), zero_reg); MacroAssembler::Move(dst.high_gp(), zero_reg);
return true; return true;
case kExprI64ReinterpretF64: case kExprI64ReinterpretF64:
SubWord(sp, sp, kDoubleSize); SubWord(sp, sp, kDoubleSize);
@ -1506,21 +1506,21 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
AddWord(sp, sp, kDoubleSize); AddWord(sp, sp, kDoubleSize);
return true; return true;
case kExprF32SConvertI32: { case kExprF32SConvertI32: {
TurboAssembler::Cvt_s_w(dst.fp(), src.gp()); MacroAssembler::Cvt_s_w(dst.fp(), src.gp());
return true; return true;
} }
case kExprF32UConvertI32: case kExprF32UConvertI32:
TurboAssembler::Cvt_s_uw(dst.fp(), src.gp()); MacroAssembler::Cvt_s_uw(dst.fp(), src.gp());
return true; return true;
case kExprF32ReinterpretI32: case kExprF32ReinterpretI32:
fmv_w_x(dst.fp(), src.gp()); fmv_w_x(dst.fp(), src.gp());
return true; return true;
case kExprF64SConvertI32: { case kExprF64SConvertI32: {
TurboAssembler::Cvt_d_w(dst.fp(), src.gp()); MacroAssembler::Cvt_d_w(dst.fp(), src.gp());
return true; return true;
} }
case kExprF64UConvertI32: case kExprF64UConvertI32:
TurboAssembler::Cvt_d_uw(dst.fp(), src.gp()); MacroAssembler::Cvt_d_uw(dst.fp(), src.gp());
return true; return true;
case kExprF64ConvertF32: case kExprF64ConvertF32:
fcvt_d_s(dst.fp(), src.fp()); fcvt_d_s(dst.fp(), src.fp());
@ -1591,11 +1591,11 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
} }
void LiftoffAssembler::emit_jump(Label* label) { void LiftoffAssembler::emit_jump(Label* label) {
TurboAssembler::Branch(label); MacroAssembler::Branch(label);
} }
void LiftoffAssembler::emit_jump(Register target) { void LiftoffAssembler::emit_jump(Register target) {
TurboAssembler::Jump(target); MacroAssembler::Jump(target);
} }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
@ -1604,34 +1604,34 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
const FreezeCacheState& frozen) { const FreezeCacheState& frozen) {
if (rhs == no_reg) { if (rhs == no_reg) {
DCHECK(kind == kI32); DCHECK(kind == kI32);
TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else { } else {
DCHECK((kind == kI32) || DCHECK((kind == kI32) ||
(is_reference(kind) && (cond == kEqual || cond == kNotEqual))); (is_reference(kind) && (cond == kEqual || cond == kNotEqual)));
TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); MacroAssembler::Branch(label, cond, lhs, Operand(rhs));
} }
} }
void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
Register lhs, int32_t imm, Register lhs, int32_t imm,
const FreezeCacheState& frozen) { const FreezeCacheState& frozen) {
TurboAssembler::Branch(label, cond, lhs, Operand(imm)); MacroAssembler::Branch(label, cond, lhs, Operand(imm));
} }
void LiftoffAssembler::emit_i32_subi_jump_negative( void LiftoffAssembler::emit_i32_subi_jump_negative(
Register value, int subtrahend, Label* result_negative, Register value, int subtrahend, Label* result_negative,
const FreezeCacheState& frozen) { const FreezeCacheState& frozen) {
SubWord(value, value, Operand(subtrahend)); SubWord(value, value, Operand(subtrahend));
TurboAssembler::Branch(result_negative, lt, value, Operand(zero_reg)); MacroAssembler::Branch(result_negative, lt, value, Operand(zero_reg));
} }
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
TurboAssembler::Sltu(dst, src, 1); MacroAssembler::Sltu(dst, src, 1);
} }
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) { Register lhs, Register rhs) {
TurboAssembler::CompareI(dst, lhs, Operand(rhs), cond); MacroAssembler::CompareI(dst, lhs, Operand(rhs), cond);
} }
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
@ -1675,7 +1675,7 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
} }
// Write 1 initially in tmp register. // Write 1 initially in tmp register.
TurboAssembler::li(tmp, 1); MacroAssembler::li(tmp, 1);
// If high words are equal, then compare low words, else compare high. // If high words are equal, then compare low words, else compare high.
Branch(&low, eq, lhs.high_gp(), Operand(rhs.high_gp())); Branch(&low, eq, lhs.high_gp(), Operand(rhs.high_gp()));
@ -1701,7 +1701,7 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
} }
bind(&cont); bind(&cont);
// Move result to dst register if needed. // Move result to dst register if needed.
TurboAssembler::Move(dst, tmp); MacroAssembler::Move(dst, tmp);
} }
void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) { void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {

View File

@ -153,17 +153,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) { RelocInfo::Mode rmode) {
switch (value.type().kind()) { switch (value.type().kind()) {
case kI32: case kI32:
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); MacroAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break; break;
case kI64: case kI64:
TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); MacroAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
break; break;
case kF32: case kF32:
TurboAssembler::LoadFPRImmediate(reg.fp(), MacroAssembler::LoadFPRImmediate(reg.fp(),
value.to_f32_boxed().get_bits()); value.to_f32_boxed().get_bits());
break; break;
case kF64: case kF64:
TurboAssembler::LoadFPRImmediate(reg.fp(), MacroAssembler::LoadFPRImmediate(reg.fp(),
value.to_f64_boxed().get_bits()); value.to_f64_boxed().get_bits());
break; break;
default: default:
@ -237,33 +237,33 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break; break;
case LoadType::kI32Load16U: case LoadType::kI32Load16U:
case LoadType::kI64Load16U: case LoadType::kI64Load16U:
TurboAssembler::Lhu(dst.gp(), src_op); MacroAssembler::Lhu(dst.gp(), src_op);
break; break;
case LoadType::kI32Load16S: case LoadType::kI32Load16S:
case LoadType::kI64Load16S: case LoadType::kI64Load16S:
TurboAssembler::Lh(dst.gp(), src_op); MacroAssembler::Lh(dst.gp(), src_op);
break; break;
case LoadType::kI64Load32U: case LoadType::kI64Load32U:
TurboAssembler::Lwu(dst.gp(), src_op); MacroAssembler::Lwu(dst.gp(), src_op);
break; break;
case LoadType::kI32Load: case LoadType::kI32Load:
case LoadType::kI64Load32S: case LoadType::kI64Load32S:
TurboAssembler::Lw(dst.gp(), src_op); MacroAssembler::Lw(dst.gp(), src_op);
break; break;
case LoadType::kI64Load: case LoadType::kI64Load:
TurboAssembler::Ld(dst.gp(), src_op); MacroAssembler::Ld(dst.gp(), src_op);
break; break;
case LoadType::kF32Load: case LoadType::kF32Load:
TurboAssembler::LoadFloat(dst.fp(), src_op); MacroAssembler::LoadFloat(dst.fp(), src_op);
break; break;
case LoadType::kF64Load: case LoadType::kF64Load:
TurboAssembler::LoadDouble(dst.fp(), src_op); MacroAssembler::LoadDouble(dst.fp(), src_op);
break; break;
case LoadType::kS128Load: { case LoadType::kS128Load: {
VU.set(kScratchReg, E8, m1); VU.set(kScratchReg, E8, m1);
Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg; Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg;
if (src_op.offset() != 0) { if (src_op.offset() != 0) {
TurboAssembler::AddWord(src_reg, src_op.rm(), src_op.offset()); MacroAssembler::AddWord(src_reg, src_op.rm(), src_op.offset());
} }
vl(dst.fp().toV(), src_reg, 0, E8); vl(dst.fp().toV(), src_reg, 0, E8);
break; break;
@ -310,20 +310,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
break; break;
case StoreType::kI32Store16: case StoreType::kI32Store16:
case StoreType::kI64Store16: case StoreType::kI64Store16:
TurboAssembler::Sh(src.gp(), dst_op); MacroAssembler::Sh(src.gp(), dst_op);
break; break;
case StoreType::kI32Store: case StoreType::kI32Store:
case StoreType::kI64Store32: case StoreType::kI64Store32:
TurboAssembler::Sw(src.gp(), dst_op); MacroAssembler::Sw(src.gp(), dst_op);
break; break;
case StoreType::kI64Store: case StoreType::kI64Store:
TurboAssembler::Sd(src.gp(), dst_op); MacroAssembler::Sd(src.gp(), dst_op);
break; break;
case StoreType::kF32Store: case StoreType::kF32Store:
TurboAssembler::StoreFloat(src.fp(), dst_op); MacroAssembler::StoreFloat(src.fp(), dst_op);
break; break;
case StoreType::kF64Store: case StoreType::kF64Store:
TurboAssembler::StoreDouble(src.fp(), dst_op); MacroAssembler::StoreDouble(src.fp(), dst_op);
break; break;
case StoreType::kS128Store: { case StoreType::kS128Store: {
VU.set(kScratchReg, E8, m1); VU.set(kScratchReg, E8, m1);
@ -692,14 +692,14 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
StoreFloat(kScratchDoubleReg, dst); StoreFloat(kScratchDoubleReg, dst);
break; break;
case kF64: case kF64:
TurboAssembler::LoadDouble(kScratchDoubleReg, src); MacroAssembler::LoadDouble(kScratchDoubleReg, src);
TurboAssembler::StoreDouble(kScratchDoubleReg, dst); MacroAssembler::StoreDouble(kScratchDoubleReg, dst);
break; break;
case kS128: { case kS128: {
VU.set(kScratchReg, E8, m1); VU.set(kScratchReg, E8, m1);
Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg; Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
if (src.offset() != 0) { if (src.offset() != 0) {
TurboAssembler::Add64(src_reg, src.rm(), src.offset()); MacroAssembler::Add64(src_reg, src.rm(), src.offset());
} }
vl(kSimd128ScratchReg, src_reg, 0, E8); vl(kSimd128ScratchReg, src_reg, 0, E8);
Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg; Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
@ -720,16 +720,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
// TODO(ksreten): Handle different sizes here. // TODO(ksreten): Handle different sizes here.
TurboAssembler::Move(dst, src); MacroAssembler::Move(dst, src);
} }
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) { ValueKind kind) {
DCHECK_NE(dst, src); DCHECK_NE(dst, src);
if (kind != kS128) { if (kind != kS128) {
TurboAssembler::Move(dst, src); MacroAssembler::Move(dst, src);
} else { } else {
TurboAssembler::vmv_vv(dst.toV(), src.toV()); MacroAssembler::vmv_vv(dst.toV(), src.toV());
} }
} }
@ -750,7 +750,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
StoreFloat(reg.fp(), dst); StoreFloat(reg.fp(), dst);
break; break;
case kF64: case kF64:
TurboAssembler::StoreDouble(reg.fp(), dst); MacroAssembler::StoreDouble(reg.fp(), dst);
break; break;
case kS128: { case kS128: {
VU.set(kScratchReg, E8, m1); VU.set(kScratchReg, E8, m1);
@ -773,7 +773,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
case kI32: { case kI32: {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register tmp = temps.Acquire(); Register tmp = temps.Acquire();
TurboAssembler::li(tmp, Operand(value.to_i32())); MacroAssembler::li(tmp, Operand(value.to_i32()));
Sw(tmp, dst); Sw(tmp, dst);
break; break;
} }
@ -782,7 +782,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
case kRefNull: { case kRefNull: {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register tmp = temps.Acquire(); Register tmp = temps.Acquire();
TurboAssembler::li(tmp, value.to_i64()); MacroAssembler::li(tmp, value.to_i64());
Sd(tmp, dst); Sd(tmp, dst);
break; break;
} }
@ -808,13 +808,13 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
LoadFloat(reg.fp(), src); LoadFloat(reg.fp(), src);
break; break;
case kF64: case kF64:
TurboAssembler::LoadDouble(reg.fp(), src); MacroAssembler::LoadDouble(reg.fp(), src);
break; break;
case kS128: { case kS128: {
VU.set(kScratchReg, E8, m1); VU.set(kScratchReg, E8, m1);
Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg; Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
if (src.offset() != 0) { if (src.offset() != 0) {
TurboAssembler::Add64(src_reg, src.rm(), src.offset()); MacroAssembler::Add64(src_reg, src.rm(), src.offset());
} }
vl(reg.fp().toV(), src_reg, 0, E8); vl(reg.fp().toV(), src_reg, 0, E8);
break; break;
@ -861,54 +861,54 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
} }
void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
TurboAssembler::Clz64(dst.gp(), src.gp()); MacroAssembler::Clz64(dst.gp(), src.gp());
} }
void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
TurboAssembler::Ctz64(dst.gp(), src.gp()); MacroAssembler::Ctz64(dst.gp(), src.gp());
} }
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
TurboAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg); MacroAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg);
return true; return true;
} }
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul32(dst, lhs, rhs); MacroAssembler::Mul32(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero, Label* trap_div_by_zero,
Label* trap_div_unrepresentable) { Label* trap_div_unrepresentable) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
// Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
TurboAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne); MacroAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne);
TurboAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne); MacroAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne);
add(kScratchReg, kScratchReg, kScratchReg2); add(kScratchReg, kScratchReg, kScratchReg2);
TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg)); Operand(zero_reg));
TurboAssembler::Div32(dst, lhs, rhs); MacroAssembler::Div32(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
TurboAssembler::Divu32(dst, lhs, rhs); MacroAssembler::Divu32(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
TurboAssembler::Mod32(dst, lhs, rhs); MacroAssembler::Mod32(dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
TurboAssembler::Modu32(dst, lhs, rhs); MacroAssembler::Modu32(dst, lhs, rhs);
} }
#define I32_BINOP(name, instruction) \ #define I32_BINOP(name, instruction) \
@ -944,15 +944,15 @@ I32_BINOP_I(xor, Xor)
#undef I32_BINOP_I #undef I32_BINOP_I
void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
TurboAssembler::Clz32(dst, src); MacroAssembler::Clz32(dst, src);
} }
void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
TurboAssembler::Ctz32(dst, src); MacroAssembler::Ctz32(dst, src);
} }
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
TurboAssembler::Popcnt32(dst, src, kScratchReg); MacroAssembler::Popcnt32(dst, src, kScratchReg);
return true; return true;
} }
@ -980,48 +980,48 @@ I32_SHIFTOP_I(shr, srliw)
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
TurboAssembler::Mul64(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Mul64(dst.gp(), lhs.gp(), rhs.gp());
} }
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, LiftoffRegister rhs,
Label* trap_div_by_zero, Label* trap_div_by_zero,
Label* trap_div_unrepresentable) { Label* trap_div_unrepresentable) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
// Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable. // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
TurboAssembler::CompareI(kScratchReg, lhs.gp(), MacroAssembler::CompareI(kScratchReg, lhs.gp(),
Operand(std::numeric_limits<int64_t>::min()), ne); Operand(std::numeric_limits<int64_t>::min()), ne);
TurboAssembler::CompareI(kScratchReg2, rhs.gp(), Operand(-1), ne); MacroAssembler::CompareI(kScratchReg2, rhs.gp(), Operand(-1), ne);
add(kScratchReg, kScratchReg, kScratchReg2); add(kScratchReg, kScratchReg, kScratchReg2);
TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
Operand(zero_reg)); Operand(zero_reg));
TurboAssembler::Div64(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Div64(dst.gp(), lhs.gp(), rhs.gp());
return true; return true;
} }
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, LiftoffRegister rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
TurboAssembler::Divu64(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Divu64(dst.gp(), lhs.gp(), rhs.gp());
return true; return true;
} }
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, LiftoffRegister rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
TurboAssembler::Mod64(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Mod64(dst.gp(), lhs.gp(), rhs.gp());
return true; return true;
} }
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, LiftoffRegister rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
TurboAssembler::Modu64(dst.gp(), lhs.gp(), rhs.gp()); MacroAssembler::Modu64(dst.gp(), lhs.gp(), rhs.gp());
return true; return true;
} }
@ -1098,7 +1098,7 @@ void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) { int64_t imm) {
TurboAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm)); MacroAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm));
} }
void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) { void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
ZeroExtendWord(dst, src); ZeroExtendWord(dst, src);
@ -1125,7 +1125,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
// According to WebAssembly spec, if I64 value does not fit the range of // According to WebAssembly spec, if I64 value does not fit the range of
// I32, the value is undefined. Therefore, We use sign extension to // I32, the value is undefined. Therefore, We use sign extension to
// implement I64 to I32 truncation // implement I64 to I32 truncation
TurboAssembler::SignExtendWord(dst.gp(), src.gp()); MacroAssembler::SignExtendWord(dst.gp(), src.gp());
return true; return true;
case kExprI32SConvertF32: case kExprI32SConvertF32:
case kExprI32UConvertF32: case kExprI32UConvertF32:
@ -1172,39 +1172,39 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
// Checking if trap. // Checking if trap.
if (trap != nullptr) { if (trap != nullptr) {
TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
} }
return true; return true;
} }
case kExprI32ReinterpretF32: case kExprI32ReinterpretF32:
TurboAssembler::ExtractLowWordFromF64(dst.gp(), src.fp()); MacroAssembler::ExtractLowWordFromF64(dst.gp(), src.fp());
return true; return true;
case kExprI64SConvertI32: case kExprI64SConvertI32:
TurboAssembler::SignExtendWord(dst.gp(), src.gp()); MacroAssembler::SignExtendWord(dst.gp(), src.gp());
return true; return true;
case kExprI64UConvertI32: case kExprI64UConvertI32:
TurboAssembler::ZeroExtendWord(dst.gp(), src.gp()); MacroAssembler::ZeroExtendWord(dst.gp(), src.gp());
return true; return true;
case kExprI64ReinterpretF64: case kExprI64ReinterpretF64:
fmv_x_d(dst.gp(), src.fp()); fmv_x_d(dst.gp(), src.fp());
return true; return true;
case kExprF32SConvertI32: { case kExprF32SConvertI32: {
TurboAssembler::Cvt_s_w(dst.fp(), src.gp()); MacroAssembler::Cvt_s_w(dst.fp(), src.gp());
return true; return true;
} }
case kExprF32UConvertI32: case kExprF32UConvertI32:
TurboAssembler::Cvt_s_uw(dst.fp(), src.gp()); MacroAssembler::Cvt_s_uw(dst.fp(), src.gp());
return true; return true;
case kExprF32ReinterpretI32: case kExprF32ReinterpretI32:
fmv_w_x(dst.fp(), src.gp()); fmv_w_x(dst.fp(), src.gp());
return true; return true;
case kExprF64SConvertI32: { case kExprF64SConvertI32: {
TurboAssembler::Cvt_d_w(dst.fp(), src.gp()); MacroAssembler::Cvt_d_w(dst.fp(), src.gp());
return true; return true;
} }
case kExprF64UConvertI32: case kExprF64UConvertI32:
TurboAssembler::Cvt_d_uw(dst.fp(), src.gp()); MacroAssembler::Cvt_d_uw(dst.fp(), src.gp());
return true; return true;
case kExprF64ConvertF32: case kExprF64ConvertF32:
fcvt_d_s(dst.fp(), src.fp()); fcvt_d_s(dst.fp(), src.fp());
@ -1286,11 +1286,11 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
} }
void LiftoffAssembler::emit_jump(Label* label) { void LiftoffAssembler::emit_jump(Label* label) {
TurboAssembler::Branch(label); MacroAssembler::Branch(label);
} }
void LiftoffAssembler::emit_jump(Register target) { void LiftoffAssembler::emit_jump(Register target) {
TurboAssembler::Jump(target); MacroAssembler::Jump(target);
} }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
@ -1299,44 +1299,44 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
const FreezeCacheState& frozen) { const FreezeCacheState& frozen) {
if (rhs == no_reg) { if (rhs == no_reg) {
DCHECK(kind == kI32 || kind == kI64); DCHECK(kind == kI32 || kind == kI64);
TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else { } else {
DCHECK((kind == kI32 || kind == kI64) || DCHECK((kind == kI32 || kind == kI64) ||
(is_reference(kind) && (cond == kEqual || cond == kNotEqual))); (is_reference(kind) && (cond == kEqual || cond == kNotEqual)));
TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); MacroAssembler::Branch(label, cond, lhs, Operand(rhs));
} }
} }
void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label,
Register lhs, int32_t imm, Register lhs, int32_t imm,
const FreezeCacheState& frozen) { const FreezeCacheState& frozen) {
TurboAssembler::Branch(label, cond, lhs, Operand(imm)); MacroAssembler::Branch(label, cond, lhs, Operand(imm));
} }
void LiftoffAssembler::emit_i32_subi_jump_negative( void LiftoffAssembler::emit_i32_subi_jump_negative(
Register value, int subtrahend, Label* result_negative, Register value, int subtrahend, Label* result_negative,
const FreezeCacheState& frozen) { const FreezeCacheState& frozen) {
Sub64(value, value, Operand(subtrahend)); Sub64(value, value, Operand(subtrahend));
TurboAssembler::Branch(result_negative, lt, value, Operand(zero_reg)); MacroAssembler::Branch(result_negative, lt, value, Operand(zero_reg));
} }
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
TurboAssembler::Sltu(dst, src, 1); MacroAssembler::Sltu(dst, src, 1);
} }
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) { Register lhs, Register rhs) {
TurboAssembler::CompareI(dst, lhs, Operand(rhs), cond); MacroAssembler::CompareI(dst, lhs, Operand(rhs), cond);
} }
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
TurboAssembler::Sltu(dst, src.gp(), 1); MacroAssembler::Sltu(dst, src.gp(), 1);
} }
void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
LiftoffRegister lhs, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
TurboAssembler::CompareI(dst, lhs.gp(), Operand(rhs.gp()), cond); MacroAssembler::CompareI(dst, lhs.gp(), Operand(rhs.gp()), cond);
} }
void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) { void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {

View File

@ -182,7 +182,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
bind(&continuation); bind(&continuation);
// Now allocate the stack space. Note that this might do more than just // Now allocate the stack space. Note that this might do more than just
// decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}.
lay(sp, MemOperand(sp, -frame_size)); lay(sp, MemOperand(sp, -frame_size));
// Jump back to the start of the function, from {pc_offset()} to // Jump back to the start of the function, from {pc_offset()} to
@ -2966,7 +2966,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
void LiftoffAssembler::AssertUnreachable(AbortReason reason) { void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
// Asserts unreachable within the wasm code. // Asserts unreachable within the wasm code.
TurboAssembler::AssertUnreachable(reason); MacroAssembler::AssertUnreachable(reason);
} }
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
@ -3120,7 +3120,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
lay(sp, MemOperand(sp, -size)); lay(sp, MemOperand(sp, -size));
TurboAssembler::Move(addr, sp); MacroAssembler::Move(addr, sp);
} }
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {

View File

@ -66,7 +66,7 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr,
} }
// Offset immediate does not fit in 31 bits. // Offset immediate does not fit in 31 bits.
Register scratch = kScratchRegister; Register scratch = kScratchRegister;
assm->TurboAssembler::Move(scratch, offset_imm); assm->MacroAssembler::Move(scratch, offset_imm);
if (offset_reg != no_reg) assm->addq(scratch, offset_reg); if (offset_reg != no_reg) assm->addq(scratch, offset_reg);
return Operand(addr, scratch, scale_factor, 0); return Operand(addr, scratch, scale_factor, 0);
} }
@ -270,7 +270,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
bind(&continuation); bind(&continuation);
// Now allocate the stack space. Note that this might do more than just // Now allocate the stack space. Note that this might do more than just
// decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}.
AllocateStackSpace(frame_size); AllocateStackSpace(frame_size);
// Jump back to the start of the function, from {pc_offset()} to // Jump back to the start of the function, from {pc_offset()} to
@ -309,16 +309,16 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
break; break;
case kI64: case kI64:
if (RelocInfo::IsNoInfo(rmode)) { if (RelocInfo::IsNoInfo(rmode)) {
TurboAssembler::Move(reg.gp(), value.to_i64()); MacroAssembler::Move(reg.gp(), value.to_i64());
} else { } else {
movq(reg.gp(), Immediate64(value.to_i64(), rmode)); movq(reg.gp(), Immediate64(value.to_i64(), rmode));
} }
break; break;
case kF32: case kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break; break;
case kF64: case kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -1339,7 +1339,7 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) { int64_t imm) {
if (!is_int32(imm)) { if (!is_int32(imm)) {
TurboAssembler::Move(kScratchRegister, imm); MacroAssembler::Move(kScratchRegister, imm);
if (lhs.gp() == dst.gp()) { if (lhs.gp() == dst.gp()) {
addq(dst.gp(), kScratchRegister); addq(dst.gp(), kScratchRegister);
} else { } else {
@ -1640,10 +1640,10 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31; static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) { if (dst == src) {
TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1); MacroAssembler::Move(kScratchDoubleReg, kSignBit - 1);
Andps(dst, kScratchDoubleReg); Andps(dst, kScratchDoubleReg);
} else { } else {
TurboAssembler::Move(dst, kSignBit - 1); MacroAssembler::Move(dst, kSignBit - 1);
Andps(dst, src); Andps(dst, src);
} }
} }
@ -1651,10 +1651,10 @@ void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31; static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) { if (dst == src) {
TurboAssembler::Move(kScratchDoubleReg, kSignBit); MacroAssembler::Move(kScratchDoubleReg, kSignBit);
Xorps(dst, kScratchDoubleReg); Xorps(dst, kScratchDoubleReg);
} else { } else {
TurboAssembler::Move(dst, kSignBit); MacroAssembler::Move(dst, kSignBit);
Xorps(dst, src); Xorps(dst, src);
} }
} }
@ -1773,10 +1773,10 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) {
static constexpr uint64_t kSignBit = uint64_t{1} << 63; static constexpr uint64_t kSignBit = uint64_t{1} << 63;
if (dst == src) { if (dst == src) {
TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1); MacroAssembler::Move(kScratchDoubleReg, kSignBit - 1);
Andpd(dst, kScratchDoubleReg); Andpd(dst, kScratchDoubleReg);
} else { } else {
TurboAssembler::Move(dst, kSignBit - 1); MacroAssembler::Move(dst, kSignBit - 1);
Andpd(dst, src); Andpd(dst, src);
} }
} }
@ -1784,10 +1784,10 @@ void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) {
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint64_t kSignBit = uint64_t{1} << 63; static constexpr uint64_t kSignBit = uint64_t{1} << 63;
if (dst == src) { if (dst == src) {
TurboAssembler::Move(kScratchDoubleReg, kSignBit); MacroAssembler::Move(kScratchDoubleReg, kSignBit);
Xorpd(dst, kScratchDoubleReg); Xorpd(dst, kScratchDoubleReg);
} else { } else {
TurboAssembler::Move(dst, kSignBit); MacroAssembler::Move(dst, kSignBit);
Xorpd(dst, src); Xorpd(dst, src);
} }
} }
@ -2234,7 +2234,8 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
} }
namespace liftoff { namespace liftoff {
template <void (SharedTurboAssembler::*cmp_op)(DoubleRegister, DoubleRegister)> template <void (SharedMacroAssemblerBase::*cmp_op)(DoubleRegister,
DoubleRegister)>
void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst, void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst,
DoubleRegister lhs, DoubleRegister rhs) { DoubleRegister lhs, DoubleRegister rhs) {
Label cont; Label cont;
@ -2261,14 +2262,14 @@ void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst,
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
liftoff::EmitFloatSetCond<&TurboAssembler::Ucomiss>(this, cond, dst, lhs, liftoff::EmitFloatSetCond<&MacroAssembler::Ucomiss>(this, cond, dst, lhs,
rhs); rhs);
} }
void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
DoubleRegister lhs, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
liftoff::EmitFloatSetCond<&TurboAssembler::Ucomisd>(this, cond, dst, lhs, liftoff::EmitFloatSetCond<&MacroAssembler::Ucomisd>(this, cond, dst, lhs,
rhs); rhs);
} }
@ -2394,7 +2395,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
assm->setcc(not_equal, dst.gp()); assm->setcc(not_equal, dst.gp());
} }
template <void (SharedTurboAssembler::*pcmp)(XMMRegister, XMMRegister)> template <void (SharedMacroAssemblerBase::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst, inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src, LiftoffRegister src,
base::Optional<CpuFeature> feature = base::nullopt) { base::Optional<CpuFeature> feature = base::nullopt) {
@ -2501,7 +2502,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
uint32_t imms[4]; uint32_t imms[4];
// Shuffles that use just 1 operand are called swizzles, rhs can be ignored. // Shuffles that use just 1 operand are called swizzles, rhs can be ignored.
wasm::SimdShuffle::Pack16Lanes(imms, shuffle); wasm::SimdShuffle::Pack16Lanes(imms, shuffle);
TurboAssembler::Move(kScratchDoubleReg, make_uint64(imms[3], imms[2]), MacroAssembler::Move(kScratchDoubleReg, make_uint64(imms[3], imms[2]),
make_uint64(imms[1], imms[0])); make_uint64(imms[1], imms[0]));
Pshufb(dst.fp(), lhs.fp(), kScratchDoubleReg); Pshufb(dst.fp(), lhs.fp(), kScratchDoubleReg);
return; return;
@ -2514,7 +2515,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
mask1[j] <<= 8; mask1[j] <<= 8;
mask1[j] |= lane < kSimd128Size ? lane : 0x80; mask1[j] |= lane < kSimd128Size ? lane : 0x80;
} }
TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask1[1], mask1[0]); MacroAssembler::Move(liftoff::kScratchDoubleReg2, mask1[1], mask1[0]);
Pshufb(kScratchDoubleReg, lhs.fp(), liftoff::kScratchDoubleReg2); Pshufb(kScratchDoubleReg, lhs.fp(), liftoff::kScratchDoubleReg2);
uint64_t mask2[2] = {}; uint64_t mask2[2] = {};
@ -2524,7 +2525,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
mask2[j] <<= 8; mask2[j] <<= 8;
mask2[j] |= lane >= kSimd128Size ? (lane & 0x0F) : 0x80; mask2[j] |= lane >= kSimd128Size ? (lane & 0x0F) : 0x80;
} }
TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask2[1], mask2[0]); MacroAssembler::Move(liftoff::kScratchDoubleReg2, mask2[1], mask2[0]);
Pshufb(dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg2); Pshufb(dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg2);
Por(dst.fp(), kScratchDoubleReg); Por(dst.fp(), kScratchDoubleReg);
@ -2901,7 +2902,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) { const uint8_t imms[16]) {
uint64_t vals[2]; uint64_t vals[2];
memcpy(vals, imms, sizeof(vals)); memcpy(vals, imms, sizeof(vals));
TurboAssembler::Move(dst.fp(), vals[1], vals[0]); MacroAssembler::Move(dst.fp(), vals[1], vals[0]);
} }
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) { void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
@ -2959,7 +2960,7 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src); liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqb>(this, dst, src);
} }
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
@ -3084,7 +3085,7 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst, void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src); liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqw>(this, dst, src);
} }
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst, void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
@ -3294,7 +3295,7 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst, void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src); liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqd>(this, dst, src);
} }
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst, void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
@ -3462,7 +3463,7 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst, void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1); liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqq>(this, dst, src, SSE4_1);
} }
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
@ -4161,7 +4162,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
} }
void LiftoffAssembler::AssertUnreachable(AbortReason reason) { void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
TurboAssembler::AssertUnreachable(reason); MacroAssembler::AssertUnreachable(reason);
} }
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {

View File

@ -203,7 +203,7 @@ bool JumpTableAssembler::EmitJumpSlot(Address target) {
ptrdiff_t jump_distance = reinterpret_cast<byte*>(target) - jump_pc; ptrdiff_t jump_distance = reinterpret_cast<byte*>(target) - jump_pc;
DCHECK_EQ(0, jump_distance % kInstrSize); DCHECK_EQ(0, jump_distance % kInstrSize);
int64_t instr_offset = jump_distance / kInstrSize; int64_t instr_offset = jump_distance / kInstrSize;
if (!TurboAssembler::IsNearCallOffset(instr_offset)) { if (!MacroAssembler::IsNearCallOffset(instr_offset)) {
return false; return false;
} }

View File

@ -57,7 +57,7 @@ namespace wasm {
// execute the old code afterwards, which is no problem, since that code remains // execute the old code afterwards, which is no problem, since that code remains
// available until it is garbage collected. Garbage collection itself is a // available until it is garbage collected. Garbage collection itself is a
// synchronization barrier though. // synchronization barrier though.
class V8_EXPORT_PRIVATE JumpTableAssembler : public TurboAssembler { class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
public: public:
// Translate an offset into the continuous jump table to a jump table index. // Translate an offset into the continuous jump table to a jump table index.
static uint32_t SlotOffsetToIndex(uint32_t slot_offset) { static uint32_t SlotOffsetToIndex(uint32_t slot_offset) {
@ -175,7 +175,7 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public TurboAssembler {
private: private:
// Instantiate a {JumpTableAssembler} for patching. // Instantiate a {JumpTableAssembler} for patching.
explicit JumpTableAssembler(Address slot_addr, int size = 256) explicit JumpTableAssembler(Address slot_addr, int size = 256)
: TurboAssembler(nullptr, JumpTableAssemblerOptions(), : MacroAssembler(nullptr, JumpTableAssemblerOptions(),
CodeObjectRequired::kNo, CodeObjectRequired::kNo,
ExternalAssemblerBuffer( ExternalAssemblerBuffer(
reinterpret_cast<uint8_t*>(slot_addr), size)) {} reinterpret_cast<uint8_t*>(slot_addr), size)) {}

View File

@ -1147,7 +1147,7 @@ class CodeGeneratorTester {
Builtin::kNoBuiltinId, kMaxUnoptimizedFrameHeight, Builtin::kNoBuiltinId, kMaxUnoptimizedFrameHeight,
kMaxPushedArgumentCount); kMaxPushedArgumentCount);
generator_->tasm()->CodeEntry(); generator_->masm()->CodeEntry();
// Force a frame to be created. // Force a frame to be created.
generator_->frame_access_state()->MarkHasFrame(true); generator_->frame_access_state()->MarkHasFrame(true);
@ -1239,10 +1239,10 @@ class CodeGeneratorTester {
void CheckAssembleMove(InstructionOperand* source, void CheckAssembleMove(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
int start = generator_->tasm()->pc_offset(); int start = generator_->masm()->pc_offset();
generator_->AssembleMove(MaybeTranslateSlot(source), generator_->AssembleMove(MaybeTranslateSlot(source),
MaybeTranslateSlot(destination)); MaybeTranslateSlot(destination));
CHECK(generator_->tasm()->pc_offset() > start); CHECK(generator_->masm()->pc_offset() > start);
} }
void CheckAssembleMoves(ParallelMove* moves) { void CheckAssembleMoves(ParallelMove* moves) {
@ -1255,15 +1255,15 @@ class CodeGeneratorTester {
void CheckAssembleSwap(InstructionOperand* source, void CheckAssembleSwap(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
int start = generator_->tasm()->pc_offset(); int start = generator_->masm()->pc_offset();
generator_->AssembleSwap(MaybeTranslateSlot(source), generator_->AssembleSwap(MaybeTranslateSlot(source),
MaybeTranslateSlot(destination)); MaybeTranslateSlot(destination));
CHECK(generator_->tasm()->pc_offset() > start); CHECK(generator_->masm()->pc_offset() > start);
} }
Handle<Code> Finalize() { Handle<Code> Finalize() {
generator_->FinishCode(); generator_->FinishCode();
generator_->safepoints()->Emit(generator_->tasm(), generator_->safepoints()->Emit(generator_->masm(),
frame_.GetTotalFrameSlotCount()); frame_.GetTotalFrameSlotCount());
generator_->MaybeEmitOutOfLineConstantPool(); generator_->MaybeEmitOutOfLineConstantPool();

View File

@ -12408,7 +12408,7 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
case PushPopByFour: case PushPopByFour:
// Push high-numbered registers first (to the highest addresses). // Push high-numbered registers first (to the highest addresses).
for (i = reg_count; i >= 4; i -= 4) { for (i = reg_count; i >= 4; i -= 4) {
__ Push<TurboAssembler::kDontStoreLR>(r[i - 1], r[i - 2], r[i - 3], __ Push<MacroAssembler::kDontStoreLR>(r[i - 1], r[i - 2], r[i - 3],
r[i - 4]); r[i - 4]);
} }
// Finish off the leftovers. // Finish off the leftovers.
@ -12433,7 +12433,7 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
case PushPopByFour: case PushPopByFour:
// Pop low-numbered registers first (from the lowest addresses). // Pop low-numbered registers first (from the lowest addresses).
for (i = 0; i <= (reg_count-4); i += 4) { for (i = 0; i <= (reg_count-4); i += 4) {
__ Pop<TurboAssembler::kDontLoadLR>(r[i], r[i + 1], r[i + 2], __ Pop<MacroAssembler::kDontLoadLR>(r[i], r[i + 1], r[i + 2],
r[i + 3]); r[i + 3]);
} }
// Finish off the leftovers. // Finish off the leftovers.
@ -12975,7 +12975,7 @@ TEST(copy_double_words_downwards_even) {
__ SlotAddress(x5, 12); __ SlotAddress(x5, 12);
__ SlotAddress(x6, 11); __ SlotAddress(x6, 11);
__ Mov(x7, 12); __ Mov(x7, 12);
__ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst); __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kSrcLessThanDst);
__ Pop(xzr, x4, x5, x6); __ Pop(xzr, x4, x5, x6);
__ Pop(x7, x8, x9, x10); __ Pop(x7, x8, x9, x10);
@ -13029,7 +13029,7 @@ TEST(copy_double_words_downwards_odd) {
__ SlotAddress(x5, 13); __ SlotAddress(x5, 13);
__ SlotAddress(x6, 12); __ SlotAddress(x6, 12);
__ Mov(x7, 13); __ Mov(x7, 13);
__ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst); __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kSrcLessThanDst);
__ Pop(xzr, x4); __ Pop(xzr, x4);
__ Pop(x5, x6, x7, x8); __ Pop(x5, x6, x7, x8);
@ -13085,13 +13085,13 @@ TEST(copy_noop) {
__ SlotAddress(x5, 3); __ SlotAddress(x5, 3);
__ SlotAddress(x6, 2); __ SlotAddress(x6, 2);
__ Mov(x7, 0); __ Mov(x7, 0);
__ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst); __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kSrcLessThanDst);
// dst < src, count == 0 // dst < src, count == 0
__ SlotAddress(x5, 2); __ SlotAddress(x5, 2);
__ SlotAddress(x6, 3); __ SlotAddress(x6, 3);
__ Mov(x7, 0); __ Mov(x7, 0);
__ CopyDoubleWords(x5, x6, x7, TurboAssembler::kDstLessThanSrc); __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kDstLessThanSrc);
__ Pop(x1, x2, x3, x4); __ Pop(x1, x2, x3, x4);
__ Pop(x5, x6, x7, x8); __ Pop(x5, x6, x7, x8);

View File

@ -6195,11 +6195,11 @@ TEST(Trampoline_with_massive_unbound_labels) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const int kNumSlots = const int kNumSlots =
TurboAssembler::kMaxBranchOffset / TurboAssembler::kTrampolineSlotsSize; MacroAssembler::kMaxBranchOffset / MacroAssembler::kTrampolineSlotsSize;
Label labels[kNumSlots]; Label labels[kNumSlots];
{ {
TurboAssembler::BlockTrampolinePoolScope block_trampoline_pool(&assm); MacroAssembler::BlockTrampolinePoolScope block_trampoline_pool(&assm);
for (int i = 0; i < kNumSlots; i++) { for (int i = 0; i < kNumSlots; i++) {
__ Branch(&labels[i]); __ Branch(&labels[i]);
} }
@ -6218,12 +6218,12 @@ TEST(Call_with_trampoline) {
int next_buffer_check_ = v8_flags.force_long_branches int next_buffer_check_ = v8_flags.force_long_branches
? kMaxInt ? kMaxInt
: TurboAssembler::kMaxBranchOffset - : MacroAssembler::kMaxBranchOffset -
TurboAssembler::kTrampolineSlotsSize * 16; MacroAssembler::kTrampolineSlotsSize * 16;
Label done; Label done;
__ Branch(&done); __ Branch(&done);
next_buffer_check_ -= TurboAssembler::kTrampolineSlotsSize; next_buffer_check_ -= MacroAssembler::kTrampolineSlotsSize;
int num_nops = (next_buffer_check_ - __ pc_offset()) / kInstrSize - 1; int num_nops = (next_buffer_check_ - __ pc_offset()) / kInstrSize - 1;
for (int i = 0; i < num_nops; i++) { for (int i = 0; i < num_nops; i++) {

View File

@ -635,7 +635,7 @@ v8_source_set("unittests_sources") {
if (v8_current_cpu == "arm") { if (v8_current_cpu == "arm") {
sources += [ sources += [
"assembler/disasm-arm-unittest.cc", "assembler/disasm-arm-unittest.cc",
"assembler/turbo-assembler-arm-unittest.cc", "assembler/macro-assembler-arm-unittest.cc",
] ]
if (v8_enable_turbofan) { if (v8_enable_turbofan) {
sources += [ "compiler/arm/instruction-selector-arm-unittest.cc" ] sources += [ "compiler/arm/instruction-selector-arm-unittest.cc" ]
@ -644,7 +644,6 @@ v8_source_set("unittests_sources") {
sources += [ sources += [
"assembler/disasm-arm64-unittest.cc", "assembler/disasm-arm64-unittest.cc",
"assembler/macro-assembler-arm64-unittest.cc", "assembler/macro-assembler-arm64-unittest.cc",
"assembler/turbo-assembler-arm64-unittest.cc",
"codegen/pointer-auth-arm64-unittest.cc", "codegen/pointer-auth-arm64-unittest.cc",
] ]
if (v8_enable_turbofan) { if (v8_enable_turbofan) {
@ -656,7 +655,7 @@ v8_source_set("unittests_sources") {
} else if (v8_current_cpu == "x86") { } else if (v8_current_cpu == "x86") {
sources += [ sources += [
"assembler/disasm-ia32-unittest.cc", "assembler/disasm-ia32-unittest.cc",
"assembler/turbo-assembler-ia32-unittest.cc", "assembler/macro-assembler-ia32-unittest.cc",
] ]
if (v8_enable_turbofan) { if (v8_enable_turbofan) {
sources += [ "compiler/ia32/instruction-selector-ia32-unittest.cc" ] sources += [ "compiler/ia32/instruction-selector-ia32-unittest.cc" ]
@ -664,7 +663,7 @@ v8_source_set("unittests_sources") {
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ sources += [
"assembler/disasm-mips64-unittest.cc", "assembler/disasm-mips64-unittest.cc",
"assembler/turbo-assembler-mips64-unittest.cc", "assembler/macro-assembler-mips64-unittest.cc",
] ]
if (v8_enable_turbofan) { if (v8_enable_turbofan) {
sources += [ "compiler/mips64/instruction-selector-mips64-unittest.cc" ] sources += [ "compiler/mips64/instruction-selector-mips64-unittest.cc" ]
@ -672,7 +671,7 @@ v8_source_set("unittests_sources") {
} else if (v8_current_cpu == "riscv64") { } else if (v8_current_cpu == "riscv64") {
sources += [ sources += [
"assembler/disasm-riscv-unittest.cc", "assembler/disasm-riscv-unittest.cc",
"assembler/turbo-assembler-riscv-unittest.cc", "assembler/macro-assembler-riscv-unittest.cc",
] ]
if (v8_enable_turbofan) { if (v8_enable_turbofan) {
sources += [ "compiler/riscv64/instruction-selector-riscv64-unittest.cc" ] sources += [ "compiler/riscv64/instruction-selector-riscv64-unittest.cc" ]
@ -680,7 +679,7 @@ v8_source_set("unittests_sources") {
} else if (v8_current_cpu == "riscv32") { } else if (v8_current_cpu == "riscv32") {
sources += [ sources += [
"assembler/disasm-riscv-unittest.cc", "assembler/disasm-riscv-unittest.cc",
"assembler/turbo-assembler-riscv-unittest.cc", "assembler/macro-assembler-riscv-unittest.cc",
] ]
if (v8_enable_turbofan) { if (v8_enable_turbofan) {
sources += [ "compiler/riscv32/instruction-selector-riscv32-unittest.cc" ] sources += [ "compiler/riscv32/instruction-selector-riscv32-unittest.cc" ]
@ -690,7 +689,6 @@ v8_source_set("unittests_sources") {
"assembler/assembler-x64-unittest.cc", "assembler/assembler-x64-unittest.cc",
"assembler/disasm-x64-unittest.cc", "assembler/disasm-x64-unittest.cc",
"assembler/macro-assembler-x64-unittest.cc", "assembler/macro-assembler-x64-unittest.cc",
"assembler/turbo-assembler-x64-unittest.cc",
] ]
if (v8_enable_turbofan) { if (v8_enable_turbofan) {
sources += [ "compiler/x64/instruction-selector-x64-unittest.cc" ] sources += [ "compiler/x64/instruction-selector-x64-unittest.cc" ]
@ -701,7 +699,7 @@ v8_source_set("unittests_sources") {
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ sources += [
"assembler/disasm-ppc-unittest.cc", "assembler/disasm-ppc-unittest.cc",
"assembler/turbo-assembler-ppc-unittest.cc", "assembler/macro-assembler-ppc-unittest.cc",
] ]
if (v8_enable_turbofan) { if (v8_enable_turbofan) {
sources += [ "compiler/ppc/instruction-selector-ppc-unittest.cc" ] sources += [ "compiler/ppc/instruction-selector-ppc-unittest.cc" ]
@ -709,7 +707,7 @@ v8_source_set("unittests_sources") {
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ sources += [
"assembler/disasm-s390-unittest.cc", "assembler/disasm-s390-unittest.cc",
"assembler/turbo-assembler-s390-unittest.cc", "assembler/macro-assembler-s390-unittest.cc",
] ]
if (v8_enable_turbofan) { if (v8_enable_turbofan) {
sources += [ "compiler/s390/instruction-selector-s390-unittest.cc" ] sources += [ "compiler/s390/instruction-selector-s390-unittest.cc" ]
@ -717,7 +715,7 @@ v8_source_set("unittests_sources") {
} else if (v8_current_cpu == "loong64") { } else if (v8_current_cpu == "loong64") {
sources += [ sources += [
"assembler/disasm-loong64-unittest.cc", "assembler/disasm-loong64-unittest.cc",
"assembler/turbo-assembler-loong64-unittest.cc", "assembler/macro-assembler-loong64-unittest.cc",
] ]
if (v8_enable_turbofan) { if (v8_enable_turbofan) {
sources += [ "compiler/loong64/instruction-selector-loong64-unittest.cc" ] sources += [ "compiler/loong64/instruction-selector-loong64-unittest.cc" ]

View File

@ -13,7 +13,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#define __ tasm. #define __ masm.
// If we are running on android and the output is not redirected (i.e. ends up // If we are running on android and the output is not redirected (i.e. ends up
// in the android log) then we cannot find the error message in the output. This // in the android log) then we cannot find the error message in the output. This
@ -28,11 +28,11 @@ namespace internal {
// a buffer and executing them. These tests do not initialize the // a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects. // V8 library, create a context, or use any V8 objects.
class TurboAssemblerTest : public TestWithIsolate {}; class MacroAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) { TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer(); auto buffer = AllocateAssemblerBuffer();
TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView()); buffer->CreateView());
__ set_root_array_available(false); __ set_root_array_available(false);
__ set_abort_hard(true); __ set_abort_hard(true);
@ -40,7 +40,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
__ Abort(AbortReason::kNoReason); __ Abort(AbortReason::kNoReason);
CodeDesc desc; CodeDesc desc;
tasm.GetCode(isolate(), &desc); masm.GetCode(isolate(), &desc);
buffer->MakeExecutable(); buffer->MakeExecutable();
// We need an isolate here to execute in the simulator. // We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start()); auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
@ -48,9 +48,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason")); ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason"));
} }
TEST_F(TurboAssemblerTest, TestCheck) { TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer(); auto buffer = AllocateAssemblerBuffer();
TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView()); buffer->CreateView());
__ set_root_array_available(false); __ set_root_array_available(false);
__ set_abort_hard(true); __ set_abort_hard(true);
@ -62,7 +62,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ Ret(); __ Ret();
CodeDesc desc; CodeDesc desc;
tasm.GetCode(isolate(), &desc); masm.GetCode(isolate(), &desc);
buffer->MakeExecutable(); buffer->MakeExecutable();
// We need an isolate here to execute in the simulator. // We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start()); auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
@ -102,17 +102,17 @@ const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = {
const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001}; const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001};
template <typename T> template <typename T>
class TurboAssemblerTestWithParam : public TurboAssemblerTest, class MacroAssemblerTestWithParam : public MacroAssemblerTest,
public ::testing::WithParamInterface<T> {}; public ::testing::WithParamInterface<T> {};
using TurboAssemblerTestMoveObjectAndSlot = using MacroAssemblerTestMoveObjectAndSlot =
TurboAssemblerTestWithParam<MoveObjectAndSlotTestCase>; MacroAssemblerTestWithParam<MoveObjectAndSlotTestCase>;
TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { TEST_P(MacroAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
const MoveObjectAndSlotTestCase test_case = GetParam(); const MoveObjectAndSlotTestCase test_case = GetParam();
TRACED_FOREACH(int32_t, offset, kOffsets) { TRACED_FOREACH(int32_t, offset, kOffsets) {
auto buffer = AllocateAssemblerBuffer(); auto buffer = AllocateAssemblerBuffer();
TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView()); buffer->CreateView());
__ Push(r0); __ Push(r0);
__ Move(test_case.object, r1); __ Move(test_case.object, r1);
@ -143,7 +143,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
__ RecordComment("--"); __ RecordComment("--");
// The `result` pointer was saved on the stack. // The `result` pointer was saved on the stack.
UseScratchRegisterScope temps(&tasm); UseScratchRegisterScope temps(&masm);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ Pop(scratch); __ Pop(scratch);
__ str(dst_object, MemOperand(scratch)); __ str(dst_object, MemOperand(scratch));
@ -152,7 +152,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
__ Ret(); __ Ret();
CodeDesc desc; CodeDesc desc;
tasm.GetCode(nullptr, &desc); masm.GetCode(nullptr, &desc);
if (v8_flags.print_code) { if (v8_flags.print_code) {
Handle<Code> code = Handle<Code> code =
Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING).Build(); Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING).Build();
@ -179,8 +179,8 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
} }
} }
INSTANTIATE_TEST_SUITE_P(TurboAssemblerTest, INSTANTIATE_TEST_SUITE_P(MacroAssemblerTest,
TurboAssemblerTestMoveObjectAndSlot, MacroAssemblerTestMoveObjectAndSlot,
::testing::ValuesIn(kMoveObjectAndSlotTestCases)); ::testing::ValuesIn(kMoveObjectAndSlotTestCases));
#undef __ #undef __

View File

@ -1,129 +1,254 @@
// Copyright 2019 the V8 project authors. All rights reserved. // Copyright 2018 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h> #include "src/codegen/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/arm64/assembler-arm64-inl.h" #include "src/execution/simulator.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/heap/factory.h"
#include "src/objects/objects-inl.h"
#include "src/utils/ostreams.h" #include "src/utils/ostreams.h"
#include "test/common/assembler-tester.h" #include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h" #include "test/unittests/test-utils.h"
#include "testing/gtest-support.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
namespace test_macro_assembler_arm64 {
using MacroAssemblerArm64Test = TestWithIsolate;
using F0 = int();
#define __ masm. #define __ masm.
TEST_F(MacroAssemblerArm64Test, EmbeddedObj) { // If we are running on android and the output is not redirected (i.e. ends up
#ifdef V8_COMPRESS_POINTERS // in the android log) then we cannot find the error message in the output. This
Isolate* isolate = i_isolate(); // macro just returns the empty string in that case.
HandleScope handles(isolate); #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
#define ERROR_MESSAGE(msg) ""
auto buffer = AllocateAssemblerBuffer(); #else
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, #define ERROR_MESSAGE(msg) msg
buffer->CreateView());
AssemblerBufferWriteScope rw_scope(*buffer);
Handle<HeapObject> old_array = isolate->factory()->NewFixedArray(2000);
Handle<HeapObject> my_array = isolate->factory()->NewFixedArray(1000);
__ Mov(w4, Immediate(my_array, RelocInfo::COMPRESSED_EMBEDDED_OBJECT));
__ Mov(x5, old_array);
__ ret(x5);
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
#endif #endif
// Collect garbage to ensure reloc info can be walked by the heap. // Test the x64 assembler by compiling some simple functions into
CollectAllGarbage(); // a buffer and executing them. These tests do not initialize the
CollectAllGarbage(); // V8 library, create a context, or use any V8 objects.
CollectAllGarbage();
PtrComprCageBase cage_base(isolate); class MacroAssemblerTest : public TestWithIsolate {};
// Test the user-facing reloc interface. TEST_F(MacroAssemblerTest, TestHardAbort) {
const int mode_mask = RelocInfo::EmbeddedObjectModeMask();
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (RelocInfo::IsCompressedEmbeddedObject(mode)) {
CHECK_EQ(*my_array, it.rinfo()->target_object(cage_base));
} else {
CHECK(RelocInfo::IsFullEmbeddedObject(mode));
CHECK_EQ(*old_array, it.rinfo()->target_object(cage_base));
}
}
#endif // V8_COMPRESS_POINTERS
}
TEST_F(MacroAssemblerArm64Test, DeoptExitSizeIsFixed) {
Isolate* isolate = i_isolate();
HandleScope handles(isolate);
auto buffer = AllocateAssemblerBuffer(); auto buffer = AllocateAssemblerBuffer();
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView()); buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
AssemblerBufferWriteScope rw_scope(*buffer); {
AssemblerBufferWriteScope rw_scope(*buffer);
static_assert(static_cast<int>(kFirstDeoptimizeKind) == 0); __ CodeEntry();
for (int i = 0; i < kDeoptimizeKindCount; i++) {
DeoptimizeKind kind = static_cast<DeoptimizeKind>(i); __ Abort(AbortReason::kNoReason);
Label before_exit;
Builtin target = Deoptimizer::GetDeoptimizationEntry(kind); CodeDesc desc;
// Mirroring logic in code-generator.cc. masm.GetCode(isolate(), &desc);
if (kind == DeoptimizeKind::kLazy) { }
// CFI emits an extra instruction here. // We need an isolate here to execute in the simulator.
masm.BindExceptionHandler(&before_exit); auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
} else {
masm.bind(&before_exit); ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason"));
}
TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer();
MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
{
AssemblerBufferWriteScope rw_scope(*buffer);
__ CodeEntry();
// Fail if the first parameter is 17.
__ Mov(w1, Immediate(17));
__ Cmp(w0, w1); // 1st parameter is in {w0}.
__ Check(Condition::ne, AbortReason::kNoReason);
__ Ret();
CodeDesc desc;
masm.GetCode(isolate(), &desc);
}
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
f.Call(0);
f.Call(18);
ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, ERROR_MESSAGE("abort: no reason"));
}
TEST_F(MacroAssemblerTest, CompareAndBranch) {
const int kTestCases[] = {-42, 0, 42};
static_assert(Condition::eq == 0);
static_assert(Condition::le == 13);
TRACED_FORRANGE(int, cc, 0, 13) { // All conds except al and nv
Condition cond = static_cast<Condition>(cc);
TRACED_FOREACH(int, imm, kTestCases) {
auto buffer = AllocateAssemblerBuffer();
MacroAssembler masm(isolate(), AssemblerOptions{},
CodeObjectRequired::kNo, buffer->CreateView());
__ set_root_array_available(false);
__ set_abort_hard(true);
{
AssemblerBufferWriteScope rw_scope(*buffer);
__ CodeEntry();
Label start, lab;
__ Bind(&start);
__ CompareAndBranch(x0, Immediate(imm), cond, &lab);
if (imm == 0 && ((cond == eq) || (cond == ne) || (cond == hi) ||
(cond == ls))) { // One instruction generated
ASSERT_EQ(kInstrSize, __ SizeOfCodeGeneratedSince(&start));
} else { // Two instructions generated
ASSERT_EQ(static_cast<uint8_t>(2 * kInstrSize),
__ SizeOfCodeGeneratedSince(&start));
}
__ Cmp(x0, Immediate(imm));
__ Check(NegateCondition(cond),
AbortReason::kNoReason); // cond must not hold
__ Ret();
__ Bind(&lab); // Branch leads here
__ Cmp(x0, Immediate(imm));
__ Check(cond, AbortReason::kNoReason); // cond must hold
__ Ret();
CodeDesc desc;
masm.GetCode(isolate(), &desc);
}
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
TRACED_FOREACH(int, n, kTestCases) { f.Call(n); }
} }
masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
&before_exit);
CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kEagerDeoptExitSize);
} }
} }
struct MoveObjectAndSlotTestCase {
const char* comment;
Register dst_object;
Register dst_slot;
Register object;
Register offset_register = no_reg;
};
const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = {
{"no overlap", x0, x1, x2},
{"no overlap", x0, x1, x2, x3},
{"object == dst_object", x2, x1, x2},
{"object == dst_object", x2, x1, x2, x3},
{"object == dst_slot", x1, x2, x2},
{"object == dst_slot", x1, x2, x2, x3},
{"offset == dst_object", x0, x1, x2, x0},
{"offset == dst_object && object == dst_slot", x0, x1, x1, x0},
{"offset == dst_slot", x0, x1, x2, x1},
{"offset == dst_slot && object == dst_object", x0, x1, x0, x1}};
// Make sure we include offsets that cannot be encoded in an add instruction.
const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001};
template <typename T>
class MacroAssemblerTestWithParam : public MacroAssemblerTest,
public ::testing::WithParamInterface<T> {};
using MacroAssemblerTestMoveObjectAndSlot =
MacroAssemblerTestWithParam<MoveObjectAndSlotTestCase>;
TEST_P(MacroAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
const MoveObjectAndSlotTestCase test_case = GetParam();
TRACED_FOREACH(int32_t, offset, kOffsets) {
auto buffer = AllocateAssemblerBuffer();
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
{
AssemblerBufferWriteScope rw_buffer_scope(*buffer);
__ CodeEntry();
__ Push(x0, padreg);
__ Mov(test_case.object, x1);
Register src_object = test_case.object;
Register dst_object = test_case.dst_object;
Register dst_slot = test_case.dst_slot;
Operand offset_operand(0);
if (test_case.offset_register == no_reg) {
offset_operand = Operand(offset);
} else {
__ Mov(test_case.offset_register, Operand(offset));
offset_operand = Operand(test_case.offset_register);
}
std::stringstream comment;
comment << "-- " << test_case.comment << ": MoveObjectAndSlot("
<< dst_object << ", " << dst_slot << ", " << src_object << ", ";
if (test_case.offset_register == no_reg) {
comment << "#" << offset;
} else {
comment << test_case.offset_register;
}
comment << ") --";
__ RecordComment(comment.str().c_str());
__ MoveObjectAndSlot(dst_object, dst_slot, src_object, offset_operand);
__ RecordComment("--");
// The `result` pointer was saved on the stack.
UseScratchRegisterScope temps(&masm);
Register scratch = temps.AcquireX();
__ Pop(padreg, scratch);
__ Str(dst_object, MemOperand(scratch));
__ Str(dst_slot, MemOperand(scratch, kSystemPointerSize));
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
if (v8_flags.print_code) {
Handle<Code> code =
Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING)
.Build();
StdoutStream os;
code->Print(os);
}
}
// We need an isolate here to execute in the simulator.
auto f = GeneratedCode<void, byte**, byte*>::FromBuffer(isolate(),
buffer->start());
byte* object = new byte[offset];
byte* result[] = {nullptr, nullptr};
f.Call(result, object);
// The first element must be the address of the object, and the second the
// slot addressed by `offset`.
EXPECT_EQ(result[0], &object[0]);
EXPECT_EQ(result[1], &object[offset]);
delete[] object;
}
}
INSTANTIATE_TEST_SUITE_P(MacroAssemblerTest,
MacroAssemblerTestMoveObjectAndSlot,
::testing::ValuesIn(kMoveObjectAndSlotTestCases));
#undef __ #undef __
#undef ERROR_MESSAGE
} // namespace test_macro_assembler_arm64
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -11,17 +11,17 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#define __ tasm. #define __ masm.
// Test the x64 assembler by compiling some simple functions into // Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the // a buffer and executing them. These tests do not initialize the
// V8 library, create a context, or use any V8 objects. // V8 library, create a context, or use any V8 objects.
class TurboAssemblerTest : public TestWithIsolate {}; class MacroAssemblerTest : public TestWithIsolate {};
TEST_F(TurboAssemblerTest, TestHardAbort) { TEST_F(MacroAssemblerTest, TestHardAbort) {
auto buffer = AllocateAssemblerBuffer(); auto buffer = AllocateAssemblerBuffer();
TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView()); buffer->CreateView());
__ set_root_array_available(false); __ set_root_array_available(false);
__ set_abort_hard(true); __ set_abort_hard(true);
@ -29,16 +29,16 @@ TEST_F(TurboAssemblerTest, TestHardAbort) {
__ Abort(AbortReason::kNoReason); __ Abort(AbortReason::kNoReason);
CodeDesc desc; CodeDesc desc;
tasm.GetCode(isolate(), &desc); masm.GetCode(isolate(), &desc);
buffer->MakeExecutable(); buffer->MakeExecutable();
auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start()); auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
} }
TEST_F(TurboAssemblerTest, TestCheck) { TEST_F(MacroAssemblerTest, TestCheck) {
auto buffer = AllocateAssemblerBuffer(); auto buffer = AllocateAssemblerBuffer();
TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView()); buffer->CreateView());
__ set_root_array_available(false); __ set_root_array_available(false);
__ set_abort_hard(true); __ set_abort_hard(true);
@ -50,7 +50,7 @@ TEST_F(TurboAssemblerTest, TestCheck) {
__ ret(0); __ ret(0);
CodeDesc desc; CodeDesc desc;
tasm.GetCode(isolate(), &desc); masm.GetCode(isolate(), &desc);
buffer->MakeExecutable(); buffer->MakeExecutable();
auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start()); auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());

Some files were not shown because too many files have changed in this diff Show More