s390x: rename Add/Sub operations

Change-Id: I232585076ecf6a824cdbe2e989eadaf96adcc1d8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2587241
Reviewed-by: Milad Fa <mfarazma@redhat.com>
Commit-Queue: Junliang Yan <junyan@redhat.com>
Cr-Commit-Position: refs/heads/master@{#71737}
This commit is contained in:
Junliang Yan 2020-12-11 20:08:39 -05:00 committed by Commit Bot
parent b902dd979f
commit b5675aa0e5
7 changed files with 272 additions and 288 deletions

View File

@ -120,8 +120,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ SmiToPtrArrayOffset(scratch, scratch);
__ AddP(sp, sp, scratch);
__ AddP(sp, sp, Operand(kSystemPointerSize));
__ AddS64(sp, sp, scratch);
__ AddS64(sp, sp, Operand(kSystemPointerSize));
__ Ret();
__ bind(&stack_overflow);
@ -277,8 +277,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ SmiToPtrArrayOffset(r3, r3);
__ AddP(sp, sp, r3);
__ AddP(sp, sp, Operand(kSystemPointerSize));
__ AddS64(sp, sp, r3);
__ AddS64(sp, sp, Operand(kSystemPointerSize));
__ Ret();
__ bind(&check_receiver);
@ -394,7 +394,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(r8, r5);
__ bind(&loop);
__ SubP(r8, r8, Operand(1));
__ SubS64(r8, r8, Operand(1));
__ blt(&done_loop);
__ ShiftLeftU64(r1, r8, Operand(kTaggedSizeLog2));
__ la(scratch, MemOperand(r4, r1));
@ -746,7 +746,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
Label enough_stack_space, stack_overflow;
__ AddP(r7, r2, Operand(1));
__ AddS64(r7, r2, Operand(1));
__ StackOverflowCheck(r7, r1, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
@ -780,7 +780,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadP(r0, MemOperand(r8)); // dereference handle
__ Push(r0);
__ lay(r9, MemOperand(r9, -kSystemPointerSize)); // r9++;
__ SubP(r7, r7, Operand(1));
__ SubS64(r7, r7, Operand(1));
__ bgt(&argLoop);
__ bind(&argExit);
@ -866,7 +866,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ ShiftLeftU64(actual_params_size, actual_params_size,
Operand(kSystemPointerSizeLog2));
__ AddP(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
__ AddS64(actual_params_size, actual_params_size,
Operand(kSystemPointerSize));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@ -880,7 +881,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
__ AddP(sp, sp, params_size);
__ AddS64(sp, sp, params_size);
}
// Tail-call |function_id| if |actual_marker| == |expected_marker|
@ -1008,18 +1009,18 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bne(&extra_wide);
// Load the next bytecode and update table to the wide scaled table.
__ AddP(bytecode_offset, bytecode_offset, Operand(1));
__ AddS64(bytecode_offset, bytecode_offset, Operand(1));
__ LoadU8(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ AddP(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ AddS64(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ b(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
__ AddP(bytecode_offset, bytecode_offset, Operand(1));
__ AddS64(bytecode_offset, bytecode_offset, Operand(1));
__ LoadU8(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ AddP(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ AddS64(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
// Load the size of the current bytecode.
__ bind(&process_bytecode);
@ -1047,7 +1048,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Otherwise, load the size of the current bytecode and advance the offset.
__ ShiftLeftU64(scratch3, bytecode, Operand(2));
__ LoadU32(scratch3, MemOperand(bytecode_size_table, scratch3));
__ AddP(bytecode_offset, bytecode_offset, scratch3);
__ AddS64(bytecode_offset, bytecode_offset, scratch3);
__ bind(&end);
}
@ -1124,7 +1125,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Increment invocation count for the function.
__ LoadS32(r1, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
__ AddP(r1, r1, Operand(1));
__ AddS64(r1, r1, Operand(1));
__ StoreU32(r1, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
@ -1163,7 +1164,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
__ SubP(r8, sp, r4);
__ SubS64(r8, sp, r4);
__ CmpLogicalP(r8,
__ StackLimitAsMemOperand(StackLimitKind::kRealStackLimit));
__ blt(&stack_overflow);
@ -1178,7 +1179,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(r1, r4);
__ bind(&loop);
__ push(kInterpreterAccumulatorRegister);
__ SubP(r1, Operand(1));
__ SubS64(r1, Operand(1));
__ bne(&loop);
__ bind(&no_args);
}
@ -1306,9 +1307,9 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register num_args,
Register start_address,
Register scratch) {
__ SubP(scratch, num_args, Operand(1));
__ SubS64(scratch, num_args, Operand(1));
__ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ SubP(start_address, start_address, scratch);
__ SubS64(start_address, start_address, scratch);
// Push the arguments.
__ PushArray(start_address, num_args, r1, scratch,
TurboAssembler::PushArrayOrder::kReverse);
@ -1329,11 +1330,11 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
Label stack_overflow;
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ SubP(r2, r2, Operand(1));
__ SubS64(r2, r2, Operand(1));
}
// Calculate number of arguments (AddP one for receiver).
__ AddP(r5, r2, Operand(1));
// Calculate number of arguments (AddS64 one for receiver).
__ AddS64(r5, r2, Operand(1));
__ StackOverflowCheck(r5, ip, &stack_overflow);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
@ -1383,12 +1384,12 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// -- r6 : address of the first argument
// -----------------------------------
Label stack_overflow;
__ AddP(r7, r2, Operand(1));
__ AddS64(r7, r2, Operand(1));
__ StackOverflowCheck(r7, ip, &stack_overflow);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ SubP(r2, r2, Operand(1));
__ SubS64(r2, r2, Operand(1));
}
// Push the arguments. r4 and r5 will be modified.
@ -1457,7 +1458,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadTaggedPointerField(
r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
__ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ AddS64(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ b(&trampoline_loaded);
__ bind(&builtin_trampoline);
@ -1467,7 +1468,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadP(r4, MemOperand(r4));
__ bind(&trampoline_loaded);
__ AddP(r14, r4, Operand(interpreter_entry_return_pc_offset.value()));
__ AddS64(r14, r4, Operand(interpreter_entry_return_pc_offset.value()));
// Initialize the dispatch table register.
__ Move(
@ -1595,12 +1596,12 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
// from LAZY is always the last argument.
__ AddP(r2, r2,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
__ AddS64(r2, r2,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
__ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
__ StoreU64(scratch, MemOperand(sp, r1));
// Recover arguments count.
__ SubP(r2, r2,
__ SubS64(r2, r2,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
}
__ LoadP(
@ -1611,8 +1612,8 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register builtin = temps.Acquire();
__ Pop(builtin);
__ AddP(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ AddS64(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(r0);
__ mov(r14, r0);
__ LoadEntryFromBuiltinIndex(builtin);
@ -1680,8 +1681,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ AddP(r2, r3);
__ AddP(r0, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ AddS64(r2, r3);
__ AddS64(r0, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ mov(r14, r0);
// And "return" to the OSR entry point of the function.
@ -1759,12 +1760,12 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ cghi(r2, Operand::Zero());
__ b(ne, &done);
__ PushRoot(RootIndex::kUndefinedValue);
__ AddP(r2, r2, Operand(1));
__ AddS64(r2, r2, Operand(1));
__ bind(&done);
}
// 3. Adjust the actual number of arguments.
__ SubP(r2, r2, Operand(1));
__ SubS64(r2, r2, Operand(1));
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@ -1959,7 +1960,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Register num = ip, src = r8, dest = r7;
__ mov(src, sp);
__ ShiftLeftU64(r1, r6, Operand(kSystemPointerSizeLog2));
__ SubP(sp, sp, r1);
__ SubS64(sp, sp, r1);
// Update stack pointer.
__ mov(dest, sp);
__ ltgr(num, r2);
@ -1969,7 +1970,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ lay(src, MemOperand(src, kSystemPointerSize));
__ StoreU64(r0, MemOperand(dest));
__ lay(dest, MemOperand(dest, kSystemPointerSize));
__ SubP(num, num, Operand(1));
__ SubS64(num, num, Operand(1));
__ bind(&check);
__ b(ge, &copy);
}
@ -1979,8 +1980,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label loop, no_args, skip;
__ CmpP(r6, Operand::Zero());
__ beq(&no_args);
__ AddP(r4, r4,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ AddS64(r4, r4,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ mov(r1, r6);
__ bind(&loop);
__ LoadAnyTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
@ -1993,7 +1994,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ lay(r7, MemOperand(r7, kSystemPointerSize));
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
__ AddP(r2, r2, r6);
__ AddS64(r2, r2, r6);
}
// Tail-call to the actual Call or Construct builtin.
@ -2068,7 +2069,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
#endif
Label stack_done, stack_overflow;
__ SubP(r7, r7, r4);
__ SubS64(r7, r7, r4);
__ ble(&stack_done);
{
// ----------- S t a t e -------------
@ -2087,11 +2088,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Forward the arguments from the caller frame.
__ mov(r5, r5);
// Point to the first argument to copy (skipping the receiver).
__ AddP(r6, r6,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
__ AddS64(r6, r6,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
__ ShiftLeftU64(scratch, r4, Operand(kSystemPointerSizeLog2));
__ AddP(r6, r6, scratch);
__ AddS64(r6, r6, scratch);
// Move the arguments already in the stack,
// including the receiver and the return address.
@ -2102,7 +2103,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ mov(src, sp);
// Update stack pointer.
__ ShiftLeftU64(scratch, r7, Operand(kSystemPointerSizeLog2));
__ SubP(sp, sp, scratch);
__ SubS64(sp, sp, scratch);
__ mov(dest, sp);
__ ltgr(num, r2);
__ b(&check);
@ -2111,7 +2112,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ lay(src, MemOperand(src, kSystemPointerSize));
__ StoreU64(r0, MemOperand(dest));
__ lay(dest, MemOperand(dest, kSystemPointerSize));
__ SubP(num, num, Operand(1));
__ SubS64(num, num, Operand(1));
__ bind(&check);
__ b(ge, &copy);
}
@ -2121,10 +2122,10 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// friendly.
{
Label loop;
__ AddP(r2, r2, r7);
__ AddS64(r2, r2, r7);
__ bind(&loop);
{
__ SubP(r7, r7, Operand(1));
__ SubS64(r7, r7, Operand(1));
__ ShiftLeftU64(r1, r7, Operand(kSystemPointerSizeLog2));
__ LoadP(scratch, MemOperand(r6, r1));
__ StoreU64(scratch, MemOperand(r4, r1));
@ -2275,7 +2276,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label done;
__ ShiftLeftU64(r9, r6, Operand(kSystemPointerSizeLog2));
__ SubP(r1, sp, r9);
__ SubS64(r1, sp, r9);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
@ -2297,15 +2298,15 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Push [[BoundArguments]].
{
Label loop, done;
__ AddP(r2, r2, r6); // Adjust effective number of arguments.
__ AddP(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ AddS64(r2, r2, r6); // Adjust effective number of arguments.
__ AddS64(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ SubP(r1, r6, Operand(1));
__ SubS64(r1, r6, Operand(1));
__ ShiftLeftU64(r1, r1, Operand(kTaggedSizeLog2));
__ LoadAnyTaggedField(scratch, MemOperand(r4, r1), r0);
__ Push(scratch);
__ SubP(r6, r6, Operand(1));
__ SubS64(r6, r6, Operand(1));
__ bgt(&loop);
__ bind(&done);
}
@ -2533,11 +2534,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: expected number of arguments
// r5: new target (passed through to callee)
__ ShiftLeftU64(r2, r4, Operand(kSystemPointerSizeLog2));
__ AddP(r2, fp);
__ AddS64(r2, fp);
// adjust for return address and receiver
__ AddP(r2, r2, Operand(2 * kSystemPointerSize));
__ AddS64(r2, r2, Operand(2 * kSystemPointerSize));
__ ShiftLeftU64(r6, r4, Operand(kSystemPointerSizeLog2));
__ SubP(r6, r2, r6);
__ SubS64(r6, r2, r6);
// Copy the arguments (including the receiver) to the new stack frame.
// r2: copy start address
@ -2570,11 +2571,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: new target (passed through to callee)
__ LoadRoot(r7, RootIndex::kUndefinedValue);
__ SmiUntag(r1, r2);
__ SubP(r8, r4, r1);
__ SubS64(r8, r4, r1);
__ ShiftLeftU64(r1, r8, Operand(kSystemPointerSizeLog2));
__ SubP(r6, fp, r1);
__ SubS64(r6, fp, r1);
// Adjust for frame.
__ SubP(r6, r6,
__ SubS64(r6, r6,
Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
kSystemPointerSize));
@ -2914,7 +2915,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// Load scratch with exponent - 1. This is faster than loading
// with exponent because Bias + 1 = 1024 which is a *S390* immediate value.
STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
__ SubP(scratch, Operand(HeapNumber::kExponentBias + 1));
__ SubS64(scratch, Operand(HeapNumber::kExponentBias + 1));
// If exponent is greater than or equal to 84, the 32 less significant
// bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
// the result is 0.
@ -2928,7 +2929,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// Scratch contains exponent - 1.
// Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
__ mov(r0, Operand(51));
__ SubP(scratch, r0, scratch);
__ SubS64(scratch, r0, scratch);
__ CmpP(scratch, Operand::Zero());
__ ble(&only_low, Label::kNear);
// 21 <= exponent <= 51, shift scratch_low and scratch_high
@ -2938,7 +2939,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// We needs: exponent - 20.
// So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
__ mov(r0, Operand(32));
__ SubP(scratch, r0, scratch);
__ SubS64(scratch, r0, scratch);
__ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
// Set the implicit 1 before the mantissa part in scratch_high.
STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
@ -2973,7 +2974,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
#endif
__ XorP(result_reg, r0);
__ ShiftRightU32(r0, scratch_high, Operand(31));
__ AddP(result_reg, r0);
__ AddS64(result_reg, r0);
__ bind(&done);
__ Pop(scratch_high, scratch_low);
@ -3051,7 +3052,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ LoadP(r6, MemOperand(r9, kNextOffset));
__ LoadP(r7, MemOperand(r9, kLimitOffset));
__ LoadU32(r8, MemOperand(r9, kLevelOffset));
__ AddP(r8, Operand(1));
__ AddS64(r8, Operand(1));
__ StoreU32(r8, MemOperand(r9, kLevelOffset));
__ StoreReturnAddressAndCall(scratch);
@ -3072,7 +3073,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ CmpP(r3, r8);
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
}
__ SubP(r8, Operand(1));
__ SubS64(r8, Operand(1));
__ StoreU32(r8, MemOperand(r9, kLevelOffset));
__ CmpP(r7, MemOperand(r9, kLimitOffset));
__ bne(&delete_allocated_handles, Label::kNear);
@ -3200,8 +3201,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
__ AddP(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
__ AddS64(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
__ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
kSystemPointerSize));
@ -3214,7 +3215,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
__ mov(scratch,
Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
__ ShiftLeftU64(r1, argc, Operand(kSystemPointerSizeLog2));
__ AddP(scratch, r1);
__ AddS64(scratch, r1);
__ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
kSystemPointerSize));
@ -3282,7 +3283,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ mov(r2, sp); // r2 = Handle<Name>
__ AddP(r3, r2, Operand(1 * kSystemPointerSize)); // r3 = v8::PCI::args_
__ AddS64(r3, r2, Operand(1 * kSystemPointerSize)); // r3 = v8::PCI::args_
// If ABI passes Handles (pointer-sized struct) in a register:
//
@ -3311,13 +3312,13 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
if (!ABI_PASSES_HANDLES_IN_REGS) {
// pass 1st arg by reference
__ StoreU64(r2, MemOperand(sp, arg0Slot * kSystemPointerSize));
__ AddP(r2, sp, Operand(arg0Slot * kSystemPointerSize));
__ AddS64(r2, sp, Operand(arg0Slot * kSystemPointerSize));
}
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
__ StoreU64(r3, MemOperand(sp, accessorInfoSlot * kSystemPointerSize));
__ AddP(r3, sp, Operand(accessorInfoSlot * kSystemPointerSize));
__ AddS64(r3, sp, Operand(accessorInfoSlot * kSystemPointerSize));
// r3 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
@ -3387,7 +3388,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// register r6.
__ mov(r5, r14);
__ la(r6, MemOperand(sp, kSavedRegistersAreaSize));
__ SubP(r6, fp, r6);
__ SubS64(r6, fp, r6);
// Allocate a new deoptimizer object.
// Pass six arguments in r2 to r7.
@ -3461,7 +3462,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Compute a pointer to the unwinding limit in register r4; that is
// the first stack slot not part of the input frame.
__ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
__ AddP(r4, sp);
__ AddS64(r4, sp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
@ -3498,7 +3499,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ LoadU32(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
__ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
__ ShiftLeftU64(r3, r3, Operand(kSystemPointerSizeLog2));
__ AddP(r3, r6, r3);
__ AddS64(r3, r6, r3);
__ b(&outer_loop_header, Label::kNear);
__ bind(&outer_push_loop);
@ -3508,8 +3509,8 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ b(&inner_loop_header, Label::kNear);
__ bind(&inner_push_loop);
__ SubP(r5, Operand(sizeof(intptr_t)));
__ AddP(r8, r4, r5);
__ SubS64(r5, Operand(sizeof(intptr_t)));
__ AddS64(r8, r4, r5);
__ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
__ push(r8);
@ -3517,7 +3518,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ CmpP(r5, Operand::Zero());
__ bne(&inner_push_loop); // test for gt?
__ AddP(r6, r6, Operand(kSystemPointerSize));
__ AddS64(r6, r6, Operand(kSystemPointerSize));
__ bind(&outer_loop_header);
__ CmpP(r6, r3);
__ blt(&outer_push_loop);

View File

@ -431,14 +431,14 @@ void TurboAssembler::Drop(int count) {
} else if (is_int20(total)) {
lay(sp, MemOperand(sp, total));
} else {
AddP(sp, Operand(total));
AddS64(sp, Operand(total));
}
}
}
void TurboAssembler::Drop(Register count, Register scratch) {
ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2));
AddP(sp, sp, scratch);
AddS64(sp, sp, scratch);
}
void TurboAssembler::Call(Label* target) { b(r14, target); }
@ -576,7 +576,7 @@ void TurboAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = base::bits::CountPopulation(regs);
int16_t stack_offset = num_to_push * kSystemPointerSize;
SubP(location, location, Operand(stack_offset));
SubS64(location, location, Operand(stack_offset));
for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kSystemPointerSize;
@ -594,14 +594,14 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
stack_offset += kSystemPointerSize;
}
}
AddP(location, location, Operand(stack_offset));
AddS64(location, location, Operand(stack_offset));
}
void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
int16_t num_to_push = base::bits::CountPopulation(dregs);
int16_t stack_offset = num_to_push * kDoubleSize;
SubP(location, location, Operand(stack_offset));
SubS64(location, location, Operand(stack_offset));
for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
if ((dregs & (1 << i)) != 0) {
DoubleRegister dreg = DoubleRegister::from_code(i);
@ -621,7 +621,7 @@ void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
stack_offset += kDoubleSize;
}
}
AddP(location, location, Operand(stack_offset));
AddS64(location, location, Operand(stack_offset));
}
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
@ -1266,8 +1266,8 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
lay(r1, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
stack_adjustment));
} else {
AddP(r1, fp,
Operand(StandardFrameConstants::kCallerSPOffset + stack_adjustment));
AddS64(r1, fp,
Operand(StandardFrameConstants::kCallerSPOffset + stack_adjustment));
}
LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
mov(sp, r1);
@ -1426,19 +1426,19 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
// Calculate the end of destination area where we will put the arguments
// after we drop current frame. We AddP kSystemPointerSize to count the
// after we drop current frame. We AddS64 kSystemPointerSize to count the
// receiver argument which is not included into formal parameters count.
Register dst_reg = scratch0;
ShiftLeftU64(dst_reg, caller_args_count, Operand(kSystemPointerSizeLog2));
AddP(dst_reg, fp, dst_reg);
AddP(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
AddS64(dst_reg, fp, dst_reg);
AddS64(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
Register src_reg = caller_args_count;
// Calculate the end of source area. +kSystemPointerSize is for the receiver.
ShiftLeftU64(src_reg, callee_args_count, Operand(kSystemPointerSizeLog2));
AddP(src_reg, sp, src_reg);
AddP(src_reg, src_reg, Operand(kSystemPointerSize));
AddS64(src_reg, sp, src_reg);
AddS64(src_reg, src_reg, Operand(kSystemPointerSize));
if (FLAG_debug_code) {
CmpLogicalP(src_reg, dst_reg);
@ -1456,7 +1456,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
// so they must be pre-decremented in the loop.
Register tmp_reg = scratch1;
Label loop;
AddP(tmp_reg, callee_args_count, Operand(1)); // +1 for receiver
AddS64(tmp_reg, callee_args_count, Operand(1)); // +1 for receiver
mov(r1, tmp_reg);
bind(&loop);
LoadP(tmp_reg, MemOperand(src_reg, -kSystemPointerSize));
@ -1492,7 +1492,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
LoadP(scratch, StackLimitAsMemOperand(StackLimitKind::kRealStackLimit));
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
SubP(scratch, sp, scratch);
SubS64(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
ShiftLeftU64(r0, num_args, Operand(kSystemPointerSizeLog2));
CmpP(scratch, r0);
@ -1519,7 +1519,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
SubP(expected_parameter_count, expected_parameter_count,
SubS64(expected_parameter_count, expected_parameter_count,
actual_parameter_count);
ble(&regular_invoke);
@ -1536,7 +1536,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// Update stack pointer.
ShiftLeftU64(scratch, expected_parameter_count,
Operand(kSystemPointerSizeLog2));
SubP(sp, sp, scratch);
SubS64(sp, sp, scratch);
mov(dest, sp);
ltgr(num, actual_parameter_count);
b(&check);
@ -1545,7 +1545,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
lay(src, MemOperand(src, kSystemPointerSize));
StoreU64(r0, MemOperand(dest));
lay(dest, MemOperand(dest, kSystemPointerSize));
SubP(num, num, Operand(1));
SubS64(num, num, Operand(1));
bind(&check);
b(ge, &copy);
}
@ -1557,7 +1557,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&loop);
StoreU64(scratch, MemOperand(ip));
lay(ip, MemOperand(ip, kSystemPointerSize));
SubP(expected_parameter_count, expected_parameter_count, Operand(1));
SubS64(expected_parameter_count, expected_parameter_count, Operand(1));
bgt(&loop);
}
b(&regular_invoke);
@ -1889,7 +1889,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Move(scratch2, ExternalReference::Create(counter));
// @TODO(john.yan): can be optimized by asi()
LoadS32(scratch1, MemOperand(scratch2));
AddP(scratch1, Operand(value));
AddS64(scratch1, Operand(value));
StoreU32(scratch1, MemOperand(scratch2));
}
}
@ -1901,7 +1901,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Move(scratch2, ExternalReference::Create(counter));
// @TODO(john.yan): can be optimized by asi()
LoadS32(scratch1, MemOperand(scratch2));
AddP(scratch1, Operand(-value));
AddS64(scratch1, Operand(-value));
StoreU32(scratch1, MemOperand(scratch2));
}
}
@ -2671,7 +2671,7 @@ void TurboAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
//----------------------------------------------------------------------------
// Add 32-bit (Register dst = Register dst + Immediate opnd)
void TurboAssembler::Add32(Register dst, const Operand& opnd) {
void TurboAssembler::AddS32(Register dst, const Operand& opnd) {
if (is_int16(opnd.immediate()))
ahi(dst, opnd);
else
@ -2679,19 +2679,15 @@ void TurboAssembler::Add32(Register dst, const Operand& opnd) {
}
// Add Pointer Size (Register dst = Register dst + Immediate opnd)
void TurboAssembler::AddP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
void TurboAssembler::AddS64(Register dst, const Operand& opnd) {
if (is_int16(opnd.immediate()))
aghi(dst, opnd);
else
agfi(dst, opnd);
#else
Add32(dst, opnd);
#endif
}
// Add 32-bit (Register dst = Register src + Immediate opnd)
void TurboAssembler::Add32(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::AddS32(Register dst, Register src, const Operand& opnd) {
if (dst != src) {
if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
ahik(dst, src, opnd);
@ -2699,11 +2695,11 @@ void TurboAssembler::Add32(Register dst, Register src, const Operand& opnd) {
}
lr(dst, src);
}
Add32(dst, opnd);
AddS32(dst, opnd);
}
// Add Pointer Size (Register dst = Register src + Immediate opnd)
void TurboAssembler::AddP(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::AddS64(Register dst, Register src, const Operand& opnd) {
if (dst != src) {
if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
aghik(dst, src, opnd);
@ -2711,17 +2707,17 @@ void TurboAssembler::AddP(Register dst, Register src, const Operand& opnd) {
}
mov(dst, src);
}
AddP(dst, opnd);
AddS64(dst, opnd);
}
// Add 32-bit (Register dst = Register dst + Register src)
void TurboAssembler::Add32(Register dst, Register src) { ar(dst, src); }
void TurboAssembler::AddS32(Register dst, Register src) { ar(dst, src); }
// Add Pointer Size (Register dst = Register dst + Register src)
void TurboAssembler::AddP(Register dst, Register src) { agr(dst, src); }
void TurboAssembler::AddS64(Register dst, Register src) { agr(dst, src); }
// Add 32-bit (Register dst = Register src1 + Register src2)
void TurboAssembler::Add32(Register dst, Register src1, Register src2) {
void TurboAssembler::AddS32(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
// as AR is a smaller instruction
@ -2738,7 +2734,7 @@ void TurboAssembler::Add32(Register dst, Register src1, Register src2) {
}
// Add Pointer Size (Register dst = Register src1 + Register src2)
void TurboAssembler::AddP(Register dst, Register src1, Register src2) {
void TurboAssembler::AddS64(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
// as AR is a smaller instruction
@ -2755,7 +2751,7 @@ void TurboAssembler::AddP(Register dst, Register src1, Register src2) {
}
// Add 32-bit (Register-Memory)
void TurboAssembler::Add32(Register dst, const MemOperand& opnd) {
void TurboAssembler::AddS32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
a(dst, opnd);
@ -2764,17 +2760,13 @@ void TurboAssembler::Add32(Register dst, const MemOperand& opnd) {
}
// Add Pointer Size (Register-Memory)
void TurboAssembler::AddP(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
void TurboAssembler::AddS64(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
ag(dst, opnd);
#else
Add32(dst, opnd);
#endif
}
// Add 32-bit (Memory - Immediate)
void TurboAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
void TurboAssembler::AddS32(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
@ -2782,15 +2774,11 @@ void TurboAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
}
// Add Pointer-sized (Memory - Immediate)
void TurboAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
void TurboAssembler::AddS64(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
#if V8_TARGET_ARCH_S390X
agsi(opnd, imm);
#else
asi(opnd, imm);
#endif
}
//----------------------------------------------------------------------------
@ -2798,7 +2786,7 @@ void TurboAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
//----------------------------------------------------------------------------
// Add Logical 32-bit (Register dst = Register src1 + Register src2)
void TurboAssembler::AddLogical32(Register dst, Register src1, Register src2) {
void TurboAssembler::AddU32(Register dst, Register src1, Register src2) {
if (dst != src2 && dst != src1) {
lr(dst, src1);
alr(dst, src2);
@ -2814,21 +2802,17 @@ void TurboAssembler::AddLogical32(Register dst, Register src1, Register src2) {
}
// Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
void TurboAssembler::AddLogical(Register dst, const Operand& imm) {
void TurboAssembler::AddU32(Register dst, const Operand& imm) {
alfi(dst, imm);
}
// Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
void TurboAssembler::AddLogicalP(Register dst, const Operand& imm) {
#ifdef V8_TARGET_ARCH_S390X
void TurboAssembler::AddU64(Register dst, const Operand& imm) {
algfi(dst, imm);
#else
AddLogical(dst, imm);
#endif
}
// Add Logical 32-bit (Register-Memory)
void TurboAssembler::AddLogical(Register dst, const MemOperand& opnd) {
void TurboAssembler::AddU32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
al_z(dst, opnd);
@ -2837,13 +2821,9 @@ void TurboAssembler::AddLogical(Register dst, const MemOperand& opnd) {
}
// Add Logical Pointer Size (Register-Memory)
void TurboAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
void TurboAssembler::AddU64(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
alg(dst, opnd);
#else
AddLogical(dst, opnd);
#endif
}
//----------------------------------------------------------------------------
@ -2851,7 +2831,7 @@ void TurboAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
//----------------------------------------------------------------------------
// Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
void TurboAssembler::SubLogical32(Register dst, Register src1, Register src2) {
void TurboAssembler::SubU32(Register dst, Register src1, Register src2) {
if (dst != src2 && dst != src1) {
lr(dst, src1);
slr(dst, src2);
@ -2863,38 +2843,38 @@ void TurboAssembler::SubLogical32(Register dst, Register src1, Register src2) {
// dst == src2
DCHECK(dst == src2);
lr(r0, dst);
SubLogical32(dst, src1, r0);
SubU32(dst, src1, r0);
}
}
// Subtract 32-bit (Register dst = Register dst - Immediate opnd)
void TurboAssembler::Sub32(Register dst, const Operand& imm) {
Add32(dst, Operand(-(imm.immediate())));
void TurboAssembler::SubS32(Register dst, const Operand& imm) {
AddS32(dst, Operand(-(imm.immediate())));
}
// Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
void TurboAssembler::SubP(Register dst, const Operand& imm) {
AddP(dst, Operand(-(imm.immediate())));
void TurboAssembler::SubS64(Register dst, const Operand& imm) {
AddS64(dst, Operand(-(imm.immediate())));
}
// Subtract 32-bit (Register dst = Register src - Immediate opnd)
void TurboAssembler::Sub32(Register dst, Register src, const Operand& imm) {
Add32(dst, src, Operand(-(imm.immediate())));
void TurboAssembler::SubS32(Register dst, Register src, const Operand& imm) {
AddS32(dst, src, Operand(-(imm.immediate())));
}
// Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
void TurboAssembler::SubP(Register dst, Register src, const Operand& imm) {
AddP(dst, src, Operand(-(imm.immediate())));
void TurboAssembler::SubS64(Register dst, Register src, const Operand& imm) {
AddS64(dst, src, Operand(-(imm.immediate())));
}
// Subtract 32-bit (Register dst = Register dst - Register src)
void TurboAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
void TurboAssembler::SubS32(Register dst, Register src) { sr(dst, src); }
// Subtract Pointer Size (Register dst = Register dst - Register src)
void TurboAssembler::SubP(Register dst, Register src) { sgr(dst, src); }
void TurboAssembler::SubS64(Register dst, Register src) { sgr(dst, src); }
// Subtract 32-bit (Register = Register - Register)
void TurboAssembler::Sub32(Register dst, Register src1, Register src2) {
void TurboAssembler::SubS32(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srk(dst, src1, src2);
@ -2914,7 +2894,7 @@ void TurboAssembler::Sub32(Register dst, Register src1, Register src2) {
}
// Subtract Pointer Sized (Register = Register - Register)
void TurboAssembler::SubP(Register dst, Register src1, Register src2) {
void TurboAssembler::SubS64(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
sgrk(dst, src1, src2);
@ -2926,15 +2906,15 @@ void TurboAssembler::SubP(Register dst, Register src1, Register src2) {
Label done;
lcgr(dst, dst); // dst = -dst
b(overflow, &done);
AddP(dst, src1); // dst = dst + src
AddS64(dst, src1); // dst = dst + src
bind(&done);
} else {
SubP(dst, src2);
SubS64(dst, src2);
}
}
// Subtract 32-bit (Register-Memory)
void TurboAssembler::Sub32(Register dst, const MemOperand& opnd) {
void TurboAssembler::SubS32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
s(dst, opnd);
@ -2943,11 +2923,11 @@ void TurboAssembler::Sub32(Register dst, const MemOperand& opnd) {
}
// Subtract Pointer Sized (Register - Memory)
void TurboAssembler::SubP(Register dst, const MemOperand& opnd) {
void TurboAssembler::SubS64(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
sg(dst, opnd);
#else
Sub32(dst, opnd);
SubS32(dst, opnd);
#endif
}
@ -2979,7 +2959,7 @@ void TurboAssembler::LoadAndSub64(Register dst, Register src,
//----------------------------------------------------------------------------
// Subtract Logical 32-bit (Register - Memory)
void TurboAssembler::SubLogical(Register dst, const MemOperand& opnd) {
void TurboAssembler::SubU32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
sl(dst, opnd);
@ -2988,12 +2968,12 @@ void TurboAssembler::SubLogical(Register dst, const MemOperand& opnd) {
}
// Subtract Logical Pointer Sized (Register - Memory)
void TurboAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
void TurboAssembler::SubU64(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
slgf(dst, opnd);
#else
SubLogical(dst, opnd);
SubU32(dst, opnd);
#endif
}
@ -3492,7 +3472,7 @@ void TurboAssembler::BranchOnCount(Register r1, Label* l) {
brct(r1, Operand(offset));
#endif
} else {
AddP(r1, Operand(-1));
AddS64(r1, Operand(-1));
Branch(ne, Operand(offset));
}
}
@ -4225,11 +4205,11 @@ void TurboAssembler::Popcnt64(Register dst, Register src) {
popcnt(dst, src);
ShiftRightU64(r0, dst, Operand(32));
AddP(dst, r0);
AddS64(dst, r0);
ShiftRightU64(r0, dst, Operand(16));
AddP(dst, r0);
AddS64(dst, r0);
ShiftRightU64(r0, dst, Operand(8));
AddP(dst, r0);
AddS64(dst, r0);
LoadU8(dst, dst);
}
#endif
@ -4431,7 +4411,8 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// Not an off-heap trampoline, the entry point is at
// Code::raw_instruction_start().
AddP(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
AddS64(destination, code_object,
Operand(Code::kHeaderSize - kHeapObjectTag));
b(&out);
// An off-heap trampoline, the entry point is loaded from the builtin entry
@ -4439,13 +4420,14 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
bind(&if_code_is_off_heap);
LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
ShiftLeftU64(destination, scratch, Operand(kSystemPointerSizeLog2));
AddP(destination, destination, kRootRegister);
AddS64(destination, destination, kRootRegister);
LoadP(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
bind(&out);
} else {
AddP(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
AddS64(destination, code_object,
Operand(Code::kHeaderSize - kHeapObjectTag));
}
}

View File

@ -175,59 +175,59 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Arithmetic Operations
// Add (Register - Immediate)
void Add32(Register dst, const Operand& imm);
void AddP(Register dst, const Operand& imm);
void Add32(Register dst, Register src, const Operand& imm);
void AddP(Register dst, Register src, const Operand& imm);
void AddS32(Register dst, const Operand& imm);
void AddS64(Register dst, const Operand& imm);
void AddS32(Register dst, Register src, const Operand& imm);
void AddS64(Register dst, Register src, const Operand& imm);
// Add (Register - Register)
void Add32(Register dst, Register src);
void AddP(Register dst, Register src);
void Add32(Register dst, Register src1, Register src2);
void AddP(Register dst, Register src1, Register src2);
void AddS32(Register dst, Register src);
void AddS64(Register dst, Register src);
void AddS32(Register dst, Register src1, Register src2);
void AddS64(Register dst, Register src1, Register src2);
// Add (Register - Mem)
void Add32(Register dst, const MemOperand& opnd);
void AddP(Register dst, const MemOperand& opnd);
void AddS32(Register dst, const MemOperand& opnd);
void AddS64(Register dst, const MemOperand& opnd);
// Add (Mem - Immediate)
void Add32(const MemOperand& opnd, const Operand& imm);
void AddP(const MemOperand& opnd, const Operand& imm);
void AddS32(const MemOperand& opnd, const Operand& imm);
void AddS64(const MemOperand& opnd, const Operand& imm);
// Add Logical (Register - Register)
void AddLogical32(Register dst, Register src1, Register src2);
void AddU32(Register dst, Register src1, Register src2);
// Add Logical (Register - Immediate)
void AddLogical(Register dst, const Operand& imm);
void AddLogicalP(Register dst, const Operand& imm);
void AddU32(Register dst, const Operand& imm);
void AddU64(Register dst, const Operand& imm);
// Add Logical (Register - Mem)
void AddLogical(Register dst, const MemOperand& opnd);
void AddLogicalP(Register dst, const MemOperand& opnd);
void AddU32(Register dst, const MemOperand& opnd);
void AddU64(Register dst, const MemOperand& opnd);
// Subtract (Register - Immediate)
void Sub32(Register dst, const Operand& imm);
void SubP(Register dst, const Operand& imm);
void Sub32(Register dst, Register src, const Operand& imm);
void SubP(Register dst, Register src, const Operand& imm);
void SubS32(Register dst, const Operand& imm);
void SubS64(Register dst, const Operand& imm);
void SubS32(Register dst, Register src, const Operand& imm);
void SubS64(Register dst, Register src, const Operand& imm);
// Subtract (Register - Register)
void Sub32(Register dst, Register src);
void SubP(Register dst, Register src);
void Sub32(Register dst, Register src1, Register src2);
void SubP(Register dst, Register src1, Register src2);
void SubS32(Register dst, Register src);
void SubS64(Register dst, Register src);
void SubS32(Register dst, Register src1, Register src2);
void SubS64(Register dst, Register src1, Register src2);
// Subtract (Register - Mem)
void Sub32(Register dst, const MemOperand& opnd);
void SubP(Register dst, const MemOperand& opnd);
void SubS32(Register dst, const MemOperand& opnd);
void SubS64(Register dst, const MemOperand& opnd);
void LoadAndSub32(Register dst, Register src, const MemOperand& opnd);
void LoadAndSub64(Register dst, Register src, const MemOperand& opnd);
// Subtract Logical (Register - Mem)
void SubLogical(Register dst, const MemOperand& opnd);
void SubLogicalP(Register dst, const MemOperand& opnd);
void SubU32(Register dst, const MemOperand& opnd);
void SubU64(Register dst, const MemOperand& opnd);
// Subtract Logical 32-bit
void SubLogical32(Register dst, Register src1, Register src2);
void SubU32(Register dst, Register src1, Register src2);
// Multiply
void MulP(Register dst, const Operand& opnd);

View File

@ -210,10 +210,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
if (offset_ == no_reg) {
__ AddP(scratch1_, object_, Operand(offset_immediate_));
__ AddS64(scratch1_, object_, Operand(offset_immediate_));
} else {
DCHECK_EQ(0, offset_immediate_);
__ AddP(scratch1_, object_, offset_);
__ AddS64(scratch1_, object_, offset_);
}
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
@ -290,7 +290,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
case kSignedGreaterThan:
return gt;
case kOverflow:
// Overflow checked for AddP/SubP only.
// Overflow checked for AddS64/SubS64 only.
switch (op) {
case kS390_Add32:
case kS390_Add64:
@ -1064,13 +1064,13 @@ void AdjustStackPointerForTailCall(
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes);
}
tasm->AddP(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes);
}
tasm->AddP(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@ -1436,7 +1436,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
lhs_register = i.TempRegister(0);
__ SubP(lhs_register, sp, Operand(offset));
__ SubS64(lhs_register, sp, Operand(offset));
}
constexpr size_t kValueIndex = 0;
@ -1486,8 +1486,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
__ AddP(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
Operand(offset.offset()));
__ AddS64(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
Operand(offset.offset()));
break;
}
case kArchWordPoisonOnSpeculation:
@ -1676,15 +1676,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Add32: {
// zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
ASSEMBLE_BIN32_OP(RRRInstr(ark), RM32Instr(Add32), RRIInstr(Add32));
ASSEMBLE_BIN32_OP(RRRInstr(ark), RM32Instr(AddS32), RRIInstr(AddS32));
} else {
ASSEMBLE_BIN32_OP(RRInstr(ar), RM32Instr(Add32), RIInstr(Add32));
ASSEMBLE_BIN32_OP(RRInstr(ar), RM32Instr(AddS32), RIInstr(AddS32));
}
break;
}
case kS390_Add64:
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
ASSEMBLE_BIN_OP(RRRInstr(agrk), RM64Instr(ag), RRIInstr(AddP));
ASSEMBLE_BIN_OP(RRRInstr(agrk), RM64Instr(ag), RRIInstr(AddS64));
} else {
ASSEMBLE_BIN_OP(RRInstr(agr), RM64Instr(ag), RIInstr(agfi));
}
@ -1698,16 +1698,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Sub32:
// zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
ASSEMBLE_BIN32_OP(RRRInstr(srk), RM32Instr(Sub32), RRIInstr(Sub32));
ASSEMBLE_BIN32_OP(RRRInstr(srk), RM32Instr(SubS32), RRIInstr(SubS32));
} else {
ASSEMBLE_BIN32_OP(RRInstr(sr), RM32Instr(Sub32), RIInstr(Sub32));
ASSEMBLE_BIN32_OP(RRInstr(sr), RM32Instr(SubS32), RIInstr(SubS32));
}
break;
case kS390_Sub64:
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
ASSEMBLE_BIN_OP(RRRInstr(sgrk), RM64Instr(sg), RRIInstr(SubP));
ASSEMBLE_BIN_OP(RRRInstr(sgrk), RM64Instr(sg), RRIInstr(SubS64));
} else {
ASSEMBLE_BIN_OP(RRInstr(sgr), RM64Instr(sg), RIInstr(SubP));
ASSEMBLE_BIN_OP(RRInstr(sgr), RM64Instr(sg), RIInstr(SubS64));
}
break;
case kS390_SubFloat:
@ -1924,7 +1924,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Cntlz32: {
__ llgfr(i.OutputRegister(), i.InputRegister(0));
__ flogr(r0, i.OutputRegister());
__ Add32(i.OutputRegister(), r0, Operand(-32));
__ AddS32(i.OutputRegister(), r0, Operand(-32));
// No need to zero-ext b/c llgfr is done already
break;
}
@ -2608,8 +2608,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
true); \
}); \
break;
ATOMIC_BINOP_CASE(Add, Add32)
ATOMIC_BINOP_CASE(Sub, Sub32)
ATOMIC_BINOP_CASE(Add, AddS32)
ATOMIC_BINOP_CASE(Sub, SubS32)
ATOMIC_BINOP_CASE(And, And)
ATOMIC_BINOP_CASE(Or, Or)
ATOMIC_BINOP_CASE(Xor, Xor)
@ -4417,7 +4417,8 @@ void CodeGenerator::AssembleConstructFrame() {
FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
__ LoadP(scratch, MemOperand(scratch));
__ AddP(scratch, scratch, Operand(required_slots * kSystemPointerSize));
__ AddS64(scratch, scratch,
Operand(required_slots * kSystemPointerSize));
__ CmpLogicalP(sp, scratch);
__ bge(&done);
}
@ -4536,7 +4537,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (drop_jsargs) {
// We must pop all arguments from the stack (including the receiver). This
// number of arguments is given by max(1 + argc_reg, parameter_count).
__ AddP(argc_reg, argc_reg, Operand(1)); // Also pop the receiver.
__ AddS64(argc_reg, argc_reg, Operand(1)); // Also pop the receiver.
if (parameter_count > 1) {
Label skip;
__ CmpP(argc_reg, Operand(parameter_count));

View File

@ -728,8 +728,8 @@ static void VisitGeneralStore(
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
// OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
// for the store itself, so we must check compatibility with both.
// OutOfLineRecordWrite uses the offset in an 'AddS64' instruction as well
// as for the store itself, so we must check compatibility with both.
if (g.CanBeImmediate(offset, OperandMode::kInt20Imm)) {
inputs[input_count++] = g.UseImmediate(offset);
addressing_mode = kMode_MRI;

View File

@ -146,7 +146,7 @@ int RegExpMacroAssemblerS390::stack_limit_slack() {
void RegExpMacroAssemblerS390::AdvanceCurrentPosition(int by) {
if (by != 0) {
__ AddP(current_input_offset(), Operand(by * char_size()));
__ AddS64(current_input_offset(), Operand(by * char_size()));
}
}
@ -170,7 +170,7 @@ void RegExpMacroAssemblerS390::Backtrack() {
if (has_backtrack_limit()) {
Label next;
__ LoadP(r2, MemOperand(frame_pointer(), kBacktrackCount), r0);
__ AddP(r2, r2, Operand(1));
__ AddS64(r2, r2, Operand(1));
__ StoreU64(r2, MemOperand(frame_pointer(), kBacktrackCount), r0);
__ CmpLogicalP(r2, Operand(backtrack_limit()));
__ bne(&next);
@ -187,7 +187,7 @@ void RegExpMacroAssemblerS390::Backtrack() {
}
// Pop Code offset from backtrack stack, add Code and jump to location.
Pop(r2);
__ AddP(r2, code_pointer());
__ AddS64(r2, code_pointer());
__ b(r2);
}
@ -205,8 +205,8 @@ void RegExpMacroAssemblerS390::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerS390::CheckAtStart(int cp_offset, Label* on_at_start) {
__ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
__ AddP(r2, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
__ AddS64(r2, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
__ CmpP(r2, r3);
BranchOrBacktrack(eq, on_at_start);
}
@ -214,8 +214,8 @@ void RegExpMacroAssemblerS390::CheckAtStart(int cp_offset, Label* on_at_start) {
void RegExpMacroAssemblerS390::CheckNotAtStart(int cp_offset,
Label* on_not_at_start) {
__ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
__ AddP(r2, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
__ AddS64(r2, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
__ CmpP(r2, r3);
BranchOrBacktrack(ne, on_not_at_start);
}
@ -229,7 +229,7 @@ void RegExpMacroAssemblerS390::CheckGreedyLoop(Label* on_equal) {
Label backtrack_non_equal;
__ CmpP(current_input_offset(), MemOperand(backtrack_stackpointer(), 0));
__ bne(&backtrack_non_equal);
__ AddP(backtrack_stackpointer(), Operand(kSystemPointerSize));
__ AddS64(backtrack_stackpointer(), Operand(kSystemPointerSize));
BranchOrBacktrack(al, on_equal);
__ bind(&backtrack_non_equal);
@ -241,7 +241,7 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
__ LoadP(r2, register_location(start_reg)); // Index of start of
// capture
__ LoadP(r3, register_location(start_reg + 1)); // Index of end
__ SubP(r3, r3, r2);
__ SubS64(r3, r3, r2);
// At this point, the capture registers are either both set or both cleared.
// If the capture length is zero, then the capture is either empty or cleared.
@ -251,11 +251,11 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
// Check that there are enough characters left in the input.
if (read_backward) {
__ LoadP(r5, MemOperand(frame_pointer(), kStringStartMinusOne));
__ AddP(r5, r5, r3);
__ AddS64(r5, r5, r3);
__ CmpP(current_input_offset(), r5);
BranchOrBacktrack(le, on_no_match);
} else {
__ AddP(r0, r3, current_input_offset());
__ AddS64(r0, r3, current_input_offset());
BranchOrBacktrack(gt, on_no_match);
}
@ -266,10 +266,10 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
// r2 - offset of start of capture
// r3 - length of capture
__ AddP(r2, end_of_input_address());
__ AddP(r4, current_input_offset(), end_of_input_address());
__ AddS64(r2, end_of_input_address());
__ AddS64(r4, current_input_offset(), end_of_input_address());
if (read_backward) {
__ SubP(r4, r4, r3); // Offset by length when matching backwards.
__ SubS64(r4, r4, r3); // Offset by length when matching backwards.
}
__ mov(r1, Operand::Zero());
@ -290,11 +290,11 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
__ Or(r6, Operand(0x20)); // Also convert input character.
__ CmpP(r6, r5);
__ bne(&fail);
__ SubP(r5, Operand('a'));
__ SubS64(r5, Operand('a'));
__ CmpLogicalP(r5, Operand('z' - 'a')); // Is r5 a lowercase letter?
__ ble(&loop_check); // In range 'a'-'z'.
// Latin-1: Check for values in range [224,254] but not 247.
__ SubP(r5, Operand(224 - 'a'));
__ SubS64(r5, Operand(224 - 'a'));
__ CmpLogicalP(r5, Operand(254 - 224));
__ bgt(&fail); // Weren't Latin-1 letters.
__ CmpLogicalP(r5, Operand(247 - 224)); // Check for 247.
@ -311,15 +311,15 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
__ bind(&success);
// Compute new value of character position after the matched part.
__ SubP(current_input_offset(), r4, end_of_input_address());
__ SubS64(current_input_offset(), r4, end_of_input_address());
if (read_backward) {
__ LoadP(r2, register_location(start_reg)); // Index of start of capture
__ LoadP(r3,
register_location(start_reg + 1)); // Index of end of capture
__ AddP(current_input_offset(), current_input_offset(), r2);
__ SubP(current_input_offset(), current_input_offset(), r3);
__ AddS64(current_input_offset(), current_input_offset(), r2);
__ SubS64(current_input_offset(), current_input_offset(), r3);
}
__ AddP(current_input_offset(), r1);
__ AddS64(current_input_offset(), r1);
} else {
DCHECK(mode_ == UC16);
int argument_count = 4;
@ -336,15 +336,15 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
// r5: Isolate* isolate.
// Address of start of capture.
__ AddP(r2, end_of_input_address());
__ AddS64(r2, end_of_input_address());
// Length of capture.
__ mov(r4, r3);
// Save length in callee-save register for use on return.
__ mov(r6, r3);
// Address of current input position.
__ AddP(r3, current_input_offset(), end_of_input_address());
__ AddS64(r3, current_input_offset(), end_of_input_address());
if (read_backward) {
__ SubP(r3, r3, r6);
__ SubS64(r3, r3, r6);
}
// Isolate.
__ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
@ -365,9 +365,9 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
// On success, advance position by length of capture.
if (read_backward) {
__ SubP(current_input_offset(), current_input_offset(), r6);
__ SubS64(current_input_offset(), current_input_offset(), r6);
} else {
__ AddP(current_input_offset(), current_input_offset(), r6);
__ AddS64(current_input_offset(), current_input_offset(), r6);
}
}
@ -382,7 +382,7 @@ void RegExpMacroAssemblerS390::CheckNotBackReference(int start_reg,
// Find length of back-referenced capture.
__ LoadP(r2, register_location(start_reg));
__ LoadP(r3, register_location(start_reg + 1));
__ SubP(r3, r3, r2); // Length to check.
__ SubS64(r3, r3, r2); // Length to check.
// At this point, the capture registers are either both set or both cleared.
// If the capture length is zero, then the capture is either empty or cleared.
@ -392,11 +392,11 @@ void RegExpMacroAssemblerS390::CheckNotBackReference(int start_reg,
// Check that there are enough characters left in the input.
if (read_backward) {
__ LoadP(r5, MemOperand(frame_pointer(), kStringStartMinusOne));
__ AddP(r5, r5, r3);
__ AddS64(r5, r5, r3);
__ CmpP(current_input_offset(), r5);
BranchOrBacktrack(le, on_no_match);
} else {
__ AddP(r0, r3, current_input_offset());
__ AddS64(r0, r3, current_input_offset());
BranchOrBacktrack(gt, on_no_match, cr0);
}
@ -405,7 +405,7 @@ void RegExpMacroAssemblerS390::CheckNotBackReference(int start_reg,
__ la(r2, MemOperand(r2, end_of_input_address()));
__ la(r4, MemOperand(current_input_offset(), end_of_input_address()));
if (read_backward) {
__ SubP(r4, r4, r3); // Offset by length when matching backwards.
__ SubS64(r4, r4, r3); // Offset by length when matching backwards.
}
__ mov(r1, Operand::Zero());
@ -426,14 +426,14 @@ void RegExpMacroAssemblerS390::CheckNotBackReference(int start_reg,
__ blt(&loop);
// Move current character position to position after match.
__ SubP(current_input_offset(), r4, end_of_input_address());
__ SubS64(current_input_offset(), r4, end_of_input_address());
if (read_backward) {
__ LoadP(r2, register_location(start_reg)); // Index of start of capture
__ LoadP(r3, register_location(start_reg + 1)); // Index of end of capture
__ AddP(current_input_offset(), current_input_offset(), r2);
__ SubP(current_input_offset(), current_input_offset(), r3);
__ AddS64(current_input_offset(), current_input_offset(), r2);
__ SubS64(current_input_offset(), current_input_offset(), r3);
}
__ AddP(current_input_offset(), r1);
__ AddS64(current_input_offset(), r1);
__ bind(&fallthrough);
}
@ -515,7 +515,7 @@ bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
__ CmpP(current_character(), Operand(' '));
__ beq(&success);
// Check range 0x09..0x0D
__ SubP(r2, current_character(), Operand('\t'));
__ SubS64(r2, current_character(), Operand('\t'));
__ CmpLogicalP(r2, Operand('\r' - '\t'));
__ ble(&success);
// \u00a0 (NBSP).
@ -530,13 +530,13 @@ bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
return false;
case 'd':
// Match ASCII digits ('0'..'9')
__ SubP(r2, current_character(), Operand('0'));
__ SubS64(r2, current_character(), Operand('0'));
__ CmpLogicalP(r2, Operand('9' - '0'));
BranchOrBacktrack(gt, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
__ SubP(r2, current_character(), Operand('0'));
__ SubS64(r2, current_character(), Operand('0'));
__ CmpLogicalP(r2, Operand('9' - '0'));
BranchOrBacktrack(le, on_no_match);
return true;
@ -544,14 +544,14 @@ bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
// Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ XorP(r2, current_character(), Operand(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
__ SubP(r2, Operand(0x0B));
__ SubS64(r2, Operand(0x0B));
__ CmpLogicalP(r2, Operand(0x0C - 0x0B));
BranchOrBacktrack(le, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0B). I.e., check for
// 0x201D (0x2028 - 0x0B) or 0x201E.
__ SubP(r2, Operand(0x2028 - 0x0B));
__ SubS64(r2, Operand(0x2028 - 0x0B));
__ CmpLogicalP(r2, Operand(1));
BranchOrBacktrack(le, on_no_match);
}
@ -561,7 +561,7 @@ bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
// Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ XorP(r2, current_character(), Operand(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
__ SubP(r2, Operand(0x0B));
__ SubS64(r2, Operand(0x0B));
__ CmpLogicalP(r2, Operand(0x0C - 0x0B));
if (mode_ == LATIN1) {
BranchOrBacktrack(gt, on_no_match);
@ -571,7 +571,7 @@ bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0B). I.e., check for
// 0x201D (0x2028 - 0x0B) or 0x201E.
__ SubP(r2, Operand(0x2028 - 0x0B));
__ SubS64(r2, Operand(0x2028 - 0x0B));
__ CmpLogicalP(r2, Operand(1));
BranchOrBacktrack(gt, on_no_match);
__ bind(&done);
@ -692,7 +692,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
ExternalReference::address_of_jslimit(isolate());
__ mov(r2, Operand(stack_limit));
__ LoadP(r2, MemOperand(r2));
__ SubP(r2, sp, r2);
__ SubS64(r2, sp, r2);
// Handle it if the stack pointer is already below the stack limit.
__ ble(&stack_limit_hit);
// Check if there is room for the variable number of registers above
@ -719,17 +719,17 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Load input start.
__ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
// Find negative length (offset of start relative to end).
__ SubP(current_input_offset(), r4, end_of_input_address());
__ SubS64(current_input_offset(), r4, end_of_input_address());
__ LoadP(r3, MemOperand(frame_pointer(), kStartIndex));
// Set r1 to address of char before start of the input string
// (effectively string position -1).
__ mov(r1, r4);
__ SubP(r1, current_input_offset(), Operand(char_size()));
__ SubS64(r1, current_input_offset(), Operand(char_size()));
if (mode_ == UC16) {
__ ShiftLeftU64(r0, r3, Operand(1));
__ SubP(r1, r1, r0);
__ SubS64(r1, r1, r0);
} else {
__ SubP(r1, r1, r3);
__ SubS64(r1, r1, r3);
}
// Store this value in a local variable, for use when clearing
// position registers.
@ -786,13 +786,13 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ LoadP(r0, MemOperand(frame_pointer(), kInputStart));
__ LoadP(r2, MemOperand(frame_pointer(), kRegisterOutput));
__ LoadP(r4, MemOperand(frame_pointer(), kStartIndex));
__ SubP(r0, end_of_input_address(), r0);
__ SubS64(r0, end_of_input_address(), r0);
// r0 is length of input in bytes.
if (mode_ == UC16) {
__ ShiftRightU64(r0, r0, Operand(1));
}
// r0 is length of input in characters.
__ AddP(r0, r4);
__ AddS64(r0, r4);
// r0 is length of string in characters.
DCHECK_EQ(0, num_saved_registers_ % 2);
@ -810,10 +810,10 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ ShiftRightS64(r5, r5, Operand(1));
__ ShiftRightS64(r6, r6, Operand(1));
}
__ AddP(r3, r0);
__ AddP(r4, r0);
__ AddP(r5, r0);
__ AddP(r6, r0);
__ AddS64(r3, r0);
__ AddS64(r4, r0);
__ AddS64(r5, r0);
__ AddS64(r6, r0);
__ StoreU32(
r3, MemOperand(r2, -(num_saved_registers_ - i - 3) * kIntSize));
__ StoreU32(
@ -829,8 +829,8 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ ShiftRightS64(r3, r3, Operand(1));
__ ShiftRightS64(r4, r4, Operand(1));
}
__ AddP(r3, r0);
__ AddP(r4, r0);
__ AddS64(r3, r0);
__ AddS64(r4, r0);
__ StoreU32(
r3, MemOperand(r2, -(num_saved_registers_ - i - 1) * kIntSize));
__ StoreU32(r4,
@ -850,18 +850,18 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ LoadP(r3, MemOperand(frame_pointer(), kNumOutputRegisters));
__ LoadP(r4, MemOperand(frame_pointer(), kRegisterOutput));
// Increment success counter.
__ AddP(r2, Operand(1));
__ AddS64(r2, Operand(1));
__ StoreU64(r2, MemOperand(frame_pointer(), kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ SubP(r3, Operand(num_saved_registers_));
__ SubS64(r3, Operand(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ CmpP(r3, Operand(num_saved_registers_));
__ blt(&return_r2);
__ StoreU64(r3, MemOperand(frame_pointer(), kNumOutputRegisters));
// Advance the location for output.
__ AddP(r4, Operand(num_saved_registers_ * kIntSize));
__ AddS64(r4, Operand(num_saved_registers_ * kIntSize));
__ StoreU64(r4, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare r2 to initialize registers with its value in the next run.
@ -879,7 +879,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Advance current position after a zero-length match.
Label advance;
__ bind(&advance);
__ AddP(current_input_offset(), Operand((mode_ == UC16) ? 2 : 1));
__ AddS64(current_input_offset(), Operand((mode_ == UC16) ? 2 : 1));
if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
}
@ -935,7 +935,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, r2);
__ mov(r2, backtrack_stackpointer());
__ AddP(r3, frame_pointer(), Operand(kStackHighEnd));
__ AddS64(r3, frame_pointer(), Operand(kStackHighEnd));
__ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack = ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments);
@ -1039,7 +1039,7 @@ void RegExpMacroAssemblerS390::ReadCurrentPositionFromRegister(int reg) {
void RegExpMacroAssemblerS390::ReadStackPointerFromRegister(int reg) {
__ LoadP(backtrack_stackpointer(), register_location(reg), r0);
__ LoadP(r2, MemOperand(frame_pointer(), kStackHighEnd));
__ AddP(backtrack_stackpointer(), r2);
__ AddS64(backtrack_stackpointer(), r2);
}
void RegExpMacroAssemblerS390::SetCurrentPositionFromEnd(int by) {
@ -1070,7 +1070,7 @@ void RegExpMacroAssemblerS390::WriteCurrentPositionToRegister(int reg,
if (cp_offset == 0) {
__ StoreU64(current_input_offset(), register_location(reg));
} else {
__ AddP(r2, current_input_offset(), Operand(cp_offset * char_size()));
__ AddS64(r2, current_input_offset(), Operand(cp_offset * char_size()));
__ StoreU64(r2, register_location(reg));
}
}
@ -1085,7 +1085,7 @@ void RegExpMacroAssemblerS390::ClearRegisters(int reg_from, int reg_to) {
void RegExpMacroAssemblerS390::WriteStackPointerToRegister(int reg) {
__ LoadP(r3, MemOperand(frame_pointer(), kStackHighEnd));
__ SubP(r2, backtrack_stackpointer(), r3);
__ SubS64(r2, backtrack_stackpointer(), r3);
__ StoreU64(r2, register_location(reg));
}
@ -1167,7 +1167,7 @@ void RegExpMacroAssemblerS390::CheckPosition(int cp_offset,
BranchOrBacktrack(ge, on_outside_input);
} else {
__ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
__ AddP(r2, current_input_offset(), Operand(cp_offset * char_size()));
__ AddS64(r2, current_input_offset(), Operand(cp_offset * char_size()));
__ CmpP(r2, r3);
BranchOrBacktrack(le, on_outside_input);
}
@ -1201,7 +1201,7 @@ void RegExpMacroAssemblerS390::SafeCall(Label* to, Condition cond,
void RegExpMacroAssemblerS390::SafeReturn() {
__ pop(r14);
__ mov(ip, Operand(masm_->CodeObject()));
__ AddP(r14, ip);
__ AddS64(r14, ip);
__ Ret();
}
@ -1210,7 +1210,7 @@ void RegExpMacroAssemblerS390::SafeCallTarget(Label* name) {
__ CleanseP(r14);
__ mov(r0, r14);
__ mov(ip, Operand(masm_->CodeObject()));
__ SubP(r0, r0, ip);
__ SubS64(r0, r0, ip);
__ push(r0);
}

View File

@ -306,8 +306,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
// Use r3 for start address (inclusive), r4 for end address (exclusive).
push(r3);
push(r4);
SubP(r3, fp, Operand(start + size));
SubP(r4, fp, Operand(start));
SubS64(r3, fp, Operand(start + size));
SubS64(r4, fp, Operand(start));
Label loop;
bind(&loop);