ppc: rename StoreP/PX as StoreU64
Change-Id: Ia1b8bf3ada1957c27334e98207d57bb2e644ecdd Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2910796 Commit-Queue: Junliang Yan <junyan@redhat.com> Commit-Queue: Milad Fa <mfarazma@redhat.com> Auto-Submit: Junliang Yan <junyan@redhat.com> Reviewed-by: Milad Fa <mfarazma@redhat.com> Cr-Commit-Position: refs/heads/master@{#74702}
This commit is contained in:
parent
c55ee71f8b
commit
8a7db67d07
@ -518,7 +518,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
// PPC LINUX ABI:
|
||||
// preserve LR in pre-reserved slot in caller's frame
|
||||
__ mflr(r0);
|
||||
__ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize));
|
||||
__ StoreU64(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize));
|
||||
|
||||
// Save callee saved registers on the stack.
|
||||
__ MultiPush(kCalleeSaved);
|
||||
@ -559,7 +559,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
// SafeStackFrameIterator will assume we are executing C++ and miss the JS
|
||||
// frames on top.
|
||||
__ li(r0, Operand::Zero());
|
||||
__ StoreP(r0, MemOperand(r3));
|
||||
__ StoreU64(r0, MemOperand(r3));
|
||||
|
||||
Register scratch = r9;
|
||||
// Set up frame pointer for the frame to be pushed.
|
||||
@ -574,7 +574,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
__ LoadU64(scratch, MemOperand(r3));
|
||||
__ cmpi(scratch, Operand::Zero());
|
||||
__ bne(&non_outermost_js);
|
||||
__ StoreP(fp, MemOperand(r3));
|
||||
__ StoreU64(fp, MemOperand(r3));
|
||||
__ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
|
||||
Label cont;
|
||||
__ b(&cont);
|
||||
@ -607,7 +607,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
|
||||
}
|
||||
|
||||
__ StoreP(r3, MemOperand(scratch));
|
||||
__ StoreU64(r3, MemOperand(scratch));
|
||||
__ LoadRoot(r3, RootIndex::kException);
|
||||
__ b(&exit);
|
||||
|
||||
@ -641,14 +641,14 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
|
||||
__ bne(&non_outermost_js_2);
|
||||
__ mov(scratch, Operand::Zero());
|
||||
__ Move(r8, js_entry_sp);
|
||||
__ StoreP(scratch, MemOperand(r8));
|
||||
__ StoreU64(scratch, MemOperand(r8));
|
||||
__ bind(&non_outermost_js_2);
|
||||
|
||||
// Restore the top frame descriptors from the stack.
|
||||
__ pop(r6);
|
||||
__ Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
|
||||
masm->isolate()));
|
||||
__ StoreP(r6, MemOperand(scratch));
|
||||
__ StoreU64(r6, MemOperand(scratch));
|
||||
|
||||
// Reset the stack to the callee saved registers.
|
||||
__ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
|
||||
@ -1169,7 +1169,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ cmpi(r8, Operand::Zero());
|
||||
__ beq(&no_incoming_new_target_or_generator_register);
|
||||
__ ShiftLeftImm(r8, r8, Operand(kSystemPointerSizeLog2));
|
||||
__ StorePX(r6, MemOperand(fp, r8));
|
||||
__ StoreU64(r6, MemOperand(fp, r8));
|
||||
__ bind(&no_incoming_new_target_or_generator_register);
|
||||
|
||||
// Perform interrupt stack check.
|
||||
@ -1228,8 +1228,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ mov(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||
kFunctionEntryBytecodeOffset)));
|
||||
__ StoreP(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ StoreU64(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ CallRuntime(Runtime::kStackGuard);
|
||||
|
||||
// After the call, restore the bytecode array, bytecode offset and accumulator
|
||||
@ -1242,8 +1242,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||
|
||||
__ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
|
||||
__ StoreP(r0,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ StoreU64(r0,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
__ jmp(&after_stack_check_interrupt);
|
||||
|
||||
@ -1502,8 +1502,8 @@ void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
|
||||
__ bind(&enter_bytecode);
|
||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||
__ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
|
||||
__ StoreP(r5,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ StoreU64(r5,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
|
||||
@ -1538,7 +1538,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
|
||||
} else {
|
||||
// Overwrite the hole inserted by the deoptimizer with the return value
|
||||
// from the LAZY deopt point.
|
||||
__ StoreP(
|
||||
__ StoreU64(
|
||||
r3, MemOperand(
|
||||
sp, config->num_allocatable_general_registers() *
|
||||
kSystemPointerSize +
|
||||
@ -1559,7 +1559,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
|
||||
__ addi(r3, r3,
|
||||
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
|
||||
__ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
|
||||
__ StorePX(scratch, MemOperand(sp, r0));
|
||||
__ StoreU64(scratch, MemOperand(sp, r0));
|
||||
// Recover arguments count.
|
||||
__ subi(r3, r3,
|
||||
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
|
||||
@ -1685,7 +1685,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
||||
__ bind(&done);
|
||||
__ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
|
||||
__ add(sp, sp, ip);
|
||||
__ StoreP(r8, MemOperand(sp));
|
||||
__ StoreU64(r8, MemOperand(sp));
|
||||
}
|
||||
|
||||
// ----------- S t a t e -------------
|
||||
@ -1770,7 +1770,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
||||
__ bind(&done);
|
||||
__ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
|
||||
__ add(sp, sp, ip);
|
||||
__ StoreP(r8, MemOperand(sp));
|
||||
__ StoreU64(r8, MemOperand(sp));
|
||||
}
|
||||
|
||||
// ----------- S t a t e -------------
|
||||
@ -1820,7 +1820,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
|
||||
__ bind(&done);
|
||||
__ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
|
||||
__ add(sp, sp, r0);
|
||||
__ StoreP(r7, MemOperand(sp));
|
||||
__ StoreU64(r7, MemOperand(sp));
|
||||
}
|
||||
|
||||
// ----------- S t a t e -------------
|
||||
@ -2015,7 +2015,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
||||
__ subi(r8, r8, Operand(1));
|
||||
__ ShiftLeftImm(scratch, r8, Operand(kSystemPointerSizeLog2));
|
||||
__ LoadU64(r0, MemOperand(r7, scratch));
|
||||
__ StorePX(r0, MemOperand(r5, scratch));
|
||||
__ StoreU64(r0, MemOperand(r5, scratch));
|
||||
__ cmpi(r8, Operand::Zero());
|
||||
__ bne(&loop);
|
||||
}
|
||||
@ -2666,7 +2666,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
|
||||
Label skip;
|
||||
__ cmpi(cp, Operand::Zero());
|
||||
__ beq(&skip);
|
||||
__ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
||||
__ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
||||
__ bind(&skip);
|
||||
|
||||
// Reset the masking register. This is done independent of the underlying
|
||||
@ -2681,7 +2681,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
|
||||
__ Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
|
||||
masm->isolate()));
|
||||
__ mov(r0, Operand::Zero());
|
||||
__ StoreP(r0, MemOperand(ip));
|
||||
__ StoreU64(r0, MemOperand(ip));
|
||||
}
|
||||
|
||||
// Compute the handler entry address and jump to it.
|
||||
@ -2806,7 +2806,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
|
||||
argument_offset -= 2 * kSystemPointerSize;
|
||||
|
||||
__ bind(&fastpath_done);
|
||||
__ StoreP(result_reg, MemOperand(sp, argument_offset));
|
||||
__ StoreU64(result_reg, MemOperand(sp, argument_offset));
|
||||
__ Pop(result_reg, scratch);
|
||||
|
||||
__ Ret();
|
||||
@ -2893,7 +2893,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
|
||||
__ bind(&return_value_loaded);
|
||||
// No more valid handles (the result handle was the last one). Restore
|
||||
// previous handle scope.
|
||||
__ StoreP(r14, MemOperand(r17, kNextOffset));
|
||||
__ StoreU64(r14, MemOperand(r17, kNextOffset));
|
||||
if (FLAG_debug_code) {
|
||||
__ lwz(r4, MemOperand(r17, kLevelOffset));
|
||||
__ cmp(r4, r16);
|
||||
@ -2930,7 +2930,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
|
||||
|
||||
// HandleScope limit has changed. Delete allocated extensions.
|
||||
__ bind(&delete_allocated_handles);
|
||||
__ StoreP(r15, MemOperand(r17, kLimitOffset));
|
||||
__ StoreU64(r15, MemOperand(r17, kLimitOffset));
|
||||
__ mr(r14, r3);
|
||||
__ PrepareCallCFunction(1, r15);
|
||||
__ Move(r3, ExternalReference::isolate_address(isolate));
|
||||
@ -2985,22 +2985,22 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
|
||||
__ subi(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
|
||||
|
||||
// kHolder.
|
||||
__ StoreP(holder, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
__ StoreU64(holder, MemOperand(sp, 0 * kSystemPointerSize));
|
||||
|
||||
// kIsolate.
|
||||
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
|
||||
__ StoreP(scratch, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
__ StoreU64(scratch, MemOperand(sp, 1 * kSystemPointerSize));
|
||||
|
||||
// kReturnValueDefaultValue and kReturnValue.
|
||||
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
||||
__ StoreP(scratch, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
__ StoreP(scratch, MemOperand(sp, 3 * kSystemPointerSize));
|
||||
__ StoreU64(scratch, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
__ StoreU64(scratch, MemOperand(sp, 3 * kSystemPointerSize));
|
||||
|
||||
// kData.
|
||||
__ StoreP(call_data, MemOperand(sp, 4 * kSystemPointerSize));
|
||||
__ StoreU64(call_data, MemOperand(sp, 4 * kSystemPointerSize));
|
||||
|
||||
// kNewTarget.
|
||||
__ StoreP(scratch, MemOperand(sp, 5 * kSystemPointerSize));
|
||||
__ StoreU64(scratch, MemOperand(sp, 5 * kSystemPointerSize));
|
||||
|
||||
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
|
||||
// We use it below to set up the FunctionCallbackInfo object.
|
||||
@ -3022,15 +3022,15 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
|
||||
|
||||
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
|
||||
// Arguments are after the return address (pushed by EnterExitFrame()).
|
||||
__ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) *
|
||||
kSystemPointerSize));
|
||||
__ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) *
|
||||
kSystemPointerSize));
|
||||
|
||||
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
|
||||
// on the stack).
|
||||
__ addi(scratch, scratch,
|
||||
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
|
||||
__ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
|
||||
kSystemPointerSize));
|
||||
__ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
|
||||
kSystemPointerSize));
|
||||
|
||||
// FunctionCallbackInfo::length_.
|
||||
__ stw(argc,
|
||||
@ -3042,8 +3042,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
|
||||
Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
|
||||
__ ShiftLeftImm(ip, argc, Operand(kSystemPointerSizeLog2));
|
||||
__ add(scratch, scratch, ip);
|
||||
__ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
|
||||
kSystemPointerSize));
|
||||
__ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
|
||||
kSystemPointerSize));
|
||||
|
||||
// v8::InvocationCallback's argument.
|
||||
__ addi(r3, sp,
|
||||
@ -3138,13 +3138,13 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
||||
|
||||
if (!ABI_PASSES_HANDLES_IN_REGS) {
|
||||
// pass 1st arg by reference
|
||||
__ StoreP(r3, MemOperand(sp, arg0Slot * kSystemPointerSize));
|
||||
__ StoreU64(r3, MemOperand(sp, arg0Slot * kSystemPointerSize));
|
||||
__ addi(r3, sp, Operand(arg0Slot * kSystemPointerSize));
|
||||
}
|
||||
|
||||
// Create v8::PropertyCallbackInfo object on the stack and initialize
|
||||
// it's args_ field.
|
||||
__ StoreP(r4, MemOperand(sp, accessorInfoSlot * kSystemPointerSize));
|
||||
__ StoreU64(r4, MemOperand(sp, accessorInfoSlot * kSystemPointerSize));
|
||||
__ addi(r4, sp, Operand(accessorInfoSlot * kSystemPointerSize));
|
||||
// r4 = v8::PropertyCallbackInfo&
|
||||
|
||||
@ -3172,7 +3172,8 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
|
||||
// Place the return address on the stack, making the call
|
||||
// GC safe. The RegExp backend also relies on this.
|
||||
__ mflr(r0);
|
||||
__ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
|
||||
__ StoreU64(r0,
|
||||
MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
|
||||
|
||||
if (ABI_USES_FUNCTION_DESCRIPTORS) {
|
||||
// AIX/PPC64BE Linux use a function descriptor;
|
||||
@ -3220,7 +3221,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
|
||||
__ subi(sp, sp, Operand(kNumberOfRegisters * kSystemPointerSize));
|
||||
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
|
||||
if ((saved_regs & (1 << i)) != 0) {
|
||||
__ StoreP(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
|
||||
__ StoreU64(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
|
||||
}
|
||||
}
|
||||
{
|
||||
@ -3228,7 +3229,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
|
||||
Register scratch = temps.Acquire();
|
||||
__ Move(scratch, ExternalReference::Create(
|
||||
IsolateAddressId::kCEntryFPAddress, isolate));
|
||||
__ StoreP(fp, MemOperand(scratch));
|
||||
__ StoreU64(fp, MemOperand(scratch));
|
||||
}
|
||||
const int kSavedRegistersAreaSize =
|
||||
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
|
||||
@ -3272,7 +3273,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
|
||||
int offset =
|
||||
(i * kSystemPointerSize) + FrameDescription::registers_offset();
|
||||
__ LoadU64(r5, MemOperand(sp, i * kSystemPointerSize));
|
||||
__ StoreP(r5, MemOperand(r4, offset));
|
||||
__ StoreU64(r5, MemOperand(r4, offset));
|
||||
}
|
||||
|
||||
int double_regs_offset = FrameDescription::double_registers_offset();
|
||||
@ -3315,7 +3316,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
|
||||
__ b(&pop_loop_header);
|
||||
__ bind(&pop_loop);
|
||||
__ pop(r7);
|
||||
__ StoreP(r7, MemOperand(r6, 0));
|
||||
__ StoreU64(r7, MemOperand(r6, 0));
|
||||
__ addi(r6, r6, Operand(kSystemPointerSize));
|
||||
__ bind(&pop_loop_header);
|
||||
__ cmp(r5, sp);
|
||||
|
@ -402,7 +402,7 @@ void TurboAssembler::MultiPush(RegList regs, Register location) {
|
||||
for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
|
||||
if ((regs & (1 << i)) != 0) {
|
||||
stack_offset -= kSystemPointerSize;
|
||||
StoreP(ToRegister(i), MemOperand(location, stack_offset));
|
||||
StoreU64(ToRegister(i), MemOperand(location, stack_offset));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -525,7 +525,7 @@ void TurboAssembler::StoreTaggedFieldX(const Register& value,
|
||||
stwx(value, dst_field_operand);
|
||||
RecordComment("]");
|
||||
} else {
|
||||
StorePX(value, dst_field_operand);
|
||||
StoreU64(value, dst_field_operand);
|
||||
}
|
||||
}
|
||||
|
||||
@ -537,7 +537,7 @@ void TurboAssembler::StoreTaggedField(const Register& value,
|
||||
StoreWord(value, dst_field_operand, scratch);
|
||||
RecordComment("]");
|
||||
} else {
|
||||
StoreP(value, dst_field_operand, scratch);
|
||||
StoreU64(value, dst_field_operand, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1200,20 +1200,20 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
li(r8, Operand::Zero());
|
||||
StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
|
||||
StoreU64(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
|
||||
}
|
||||
if (FLAG_enable_embedded_constant_pool) {
|
||||
StoreP(kConstantPoolRegister,
|
||||
MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
|
||||
StoreU64(kConstantPoolRegister,
|
||||
MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
|
||||
}
|
||||
|
||||
// Save the frame pointer and the context in top.
|
||||
Move(r8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
|
||||
isolate()));
|
||||
StoreP(fp, MemOperand(r8));
|
||||
StoreU64(fp, MemOperand(r8));
|
||||
Move(r8,
|
||||
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
|
||||
StoreP(cp, MemOperand(r8));
|
||||
StoreU64(cp, MemOperand(r8));
|
||||
|
||||
// Optionally save all volatile double registers.
|
||||
if (save_doubles) {
|
||||
@ -1241,7 +1241,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
|
||||
// Set the exit frame sp value to point just before the return address
|
||||
// location.
|
||||
addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
|
||||
StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
|
||||
StoreU64(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
|
||||
}
|
||||
|
||||
int TurboAssembler::ActivationFrameAlignment() {
|
||||
@ -1277,7 +1277,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
|
||||
li(r6, Operand::Zero());
|
||||
Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
|
||||
isolate()));
|
||||
StoreP(r6, MemOperand(ip));
|
||||
StoreU64(r6, MemOperand(ip));
|
||||
|
||||
// Restore current context from top and clear it in debug mode.
|
||||
Move(ip,
|
||||
@ -1288,7 +1288,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
|
||||
mov(r6, Operand(Context::kInvalidContext));
|
||||
Move(ip,
|
||||
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
|
||||
StoreP(r6, MemOperand(ip));
|
||||
StoreU64(r6, MemOperand(ip));
|
||||
#endif
|
||||
|
||||
// Tear down the exit frame, pop the arguments, and return.
|
||||
@ -1597,7 +1597,7 @@ void MacroAssembler::PushStackHandler() {
|
||||
push(r0);
|
||||
|
||||
// Set this new handler as the current one.
|
||||
StoreP(sp, MemOperand(r3));
|
||||
StoreU64(sp, MemOperand(r3));
|
||||
}
|
||||
|
||||
void MacroAssembler::PopStackHandler() {
|
||||
@ -1607,7 +1607,7 @@ void MacroAssembler::PopStackHandler() {
|
||||
pop(r4);
|
||||
Move(ip,
|
||||
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
|
||||
StoreP(r4, MemOperand(ip));
|
||||
StoreU64(r4, MemOperand(ip));
|
||||
|
||||
Drop(1); // Drop padding.
|
||||
}
|
||||
@ -2082,8 +2082,8 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
|
||||
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
|
||||
ClearRightImm(sp, sp,
|
||||
Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
|
||||
StoreP(scratch,
|
||||
MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
|
||||
StoreU64(scratch,
|
||||
MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
|
||||
} else {
|
||||
// Make room for stack arguments
|
||||
stack_space += stack_passed_arguments;
|
||||
@ -2158,10 +2158,10 @@ void TurboAssembler::CallCFunctionHelper(Register function,
|
||||
// See x64 code for reasoning about how to address the isolate data fields.
|
||||
if (root_array_available()) {
|
||||
LoadPC(r0);
|
||||
StoreP(r0, MemOperand(kRootRegister,
|
||||
IsolateData::fast_c_call_caller_pc_offset()));
|
||||
StoreP(fp, MemOperand(kRootRegister,
|
||||
IsolateData::fast_c_call_caller_fp_offset()));
|
||||
StoreU64(r0, MemOperand(kRootRegister,
|
||||
IsolateData::fast_c_call_caller_pc_offset()));
|
||||
StoreU64(fp, MemOperand(kRootRegister,
|
||||
IsolateData::fast_c_call_caller_fp_offset()));
|
||||
} else {
|
||||
DCHECK_NOT_NULL(isolate());
|
||||
Push(addr_scratch);
|
||||
@ -2169,10 +2169,10 @@ void TurboAssembler::CallCFunctionHelper(Register function,
|
||||
Move(addr_scratch,
|
||||
ExternalReference::fast_c_call_caller_pc_address(isolate()));
|
||||
LoadPC(r0);
|
||||
StoreP(r0, MemOperand(addr_scratch));
|
||||
StoreU64(r0, MemOperand(addr_scratch));
|
||||
Move(addr_scratch,
|
||||
ExternalReference::fast_c_call_caller_fp_address(isolate()));
|
||||
StoreP(fp, MemOperand(addr_scratch));
|
||||
StoreU64(fp, MemOperand(addr_scratch));
|
||||
Pop(addr_scratch);
|
||||
}
|
||||
mtlr(scratch);
|
||||
@ -2202,7 +2202,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
|
||||
mov(zero_scratch, Operand::Zero());
|
||||
|
||||
if (root_array_available()) {
|
||||
StoreP(
|
||||
StoreU64(
|
||||
zero_scratch,
|
||||
MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
|
||||
} else {
|
||||
@ -2210,7 +2210,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
|
||||
Push(addr_scratch);
|
||||
Move(addr_scratch,
|
||||
ExternalReference::fast_c_call_caller_fp_address(isolate()));
|
||||
StoreP(zero_scratch, MemOperand(addr_scratch));
|
||||
StoreU64(zero_scratch, MemOperand(addr_scratch));
|
||||
Pop(addr_scratch);
|
||||
}
|
||||
|
||||
@ -2730,35 +2730,33 @@ void TurboAssembler::LoadPU(Register dst, const MemOperand& mem,
|
||||
}
|
||||
|
||||
// Store a "pointer" sized value to the memory location
|
||||
void TurboAssembler::StoreP(Register src, const MemOperand& mem,
|
||||
Register scratch) {
|
||||
void TurboAssembler::StoreU64(Register src, const MemOperand& mem,
|
||||
Register scratch) {
|
||||
int offset = mem.offset();
|
||||
int misaligned = (offset & 3);
|
||||
|
||||
if (!is_int16(offset)) {
|
||||
/* cannot use d-form */
|
||||
DCHECK(scratch != no_reg);
|
||||
mov(scratch, Operand(offset));
|
||||
StorePX(src, MemOperand(mem.ra(), scratch));
|
||||
} else {
|
||||
#if V8_TARGET_ARCH_PPC64
|
||||
int misaligned = (offset & 3);
|
||||
if (misaligned) {
|
||||
// adjust base to conform to offset alignment requirements
|
||||
// a suitable scratch is required here
|
||||
DCHECK(scratch != no_reg);
|
||||
if (scratch == r0) {
|
||||
LoadIntLiteral(scratch, offset);
|
||||
stdx(src, MemOperand(mem.ra(), scratch));
|
||||
} else {
|
||||
addi(scratch, mem.ra(), Operand((offset & 3) - 4));
|
||||
std(src, MemOperand(scratch, (offset & ~3) + 4));
|
||||
}
|
||||
if (mem.rb() == no_reg) {
|
||||
if (!is_int16(offset) || misaligned) {
|
||||
/* cannot use d-form */
|
||||
CHECK_NE(scratch, no_reg);
|
||||
mov(scratch, Operand(offset));
|
||||
stdx(src, MemOperand(mem.ra(), scratch));
|
||||
} else {
|
||||
std(src, mem);
|
||||
}
|
||||
#else
|
||||
stw(src, mem);
|
||||
#endif
|
||||
} else {
|
||||
if (offset == 0) {
|
||||
stdx(src, mem);
|
||||
} else if (is_int16(offset)) {
|
||||
CHECK_NE(scratch, no_reg);
|
||||
addi(scratch, mem.rb(), Operand(offset));
|
||||
stdx(src, MemOperand(mem.ra(), scratch));
|
||||
} else {
|
||||
CHECK_NE(scratch, no_reg);
|
||||
mov(scratch, Operand(offset));
|
||||
add(scratch, scratch, mem.rb());
|
||||
stdx(src, MemOperand(mem.ra(), scratch));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3070,7 +3068,7 @@ void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
|
||||
DCHECK(!AreAliased(src, scratch));
|
||||
mr(scratch, src);
|
||||
LoadU64(src, dst, r0);
|
||||
StoreP(scratch, dst, r0);
|
||||
StoreU64(scratch, dst, r0);
|
||||
}
|
||||
|
||||
void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
|
||||
@ -3093,15 +3091,15 @@ void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
|
||||
}
|
||||
LoadU64(scratch_1, dst, scratch_0);
|
||||
LoadU64(scratch_0, src);
|
||||
StoreP(scratch_1, src);
|
||||
StoreP(scratch_0, dst, scratch_1);
|
||||
StoreU64(scratch_1, src);
|
||||
StoreU64(scratch_0, dst, scratch_1);
|
||||
} else {
|
||||
LoadU64(scratch_1, dst, scratch_0);
|
||||
push(scratch_1);
|
||||
LoadU64(scratch_0, src, scratch_1);
|
||||
StoreP(scratch_0, dst, scratch_1);
|
||||
StoreU64(scratch_0, dst, scratch_1);
|
||||
pop(scratch_1);
|
||||
StoreP(scratch_1, src, scratch_0);
|
||||
StoreU64(scratch_1, src, scratch_0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3331,7 +3329,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
|
||||
LoadPC(r7);
|
||||
bind(&start_call);
|
||||
addi(r7, r7, Operand(after_call_offset));
|
||||
StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
|
||||
StoreU64(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
|
||||
Call(dest);
|
||||
|
||||
DCHECK_EQ(after_call_offset - kInstrSize,
|
||||
|
@ -39,7 +39,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
|
||||
// These exist to provide portability between 32 and 64bit
|
||||
#if V8_TARGET_ARCH_PPC64
|
||||
#define LoadPUX ldux
|
||||
#define StorePX stdx
|
||||
#define StorePUX stdux
|
||||
#define ShiftLeftImm sldi
|
||||
#define ShiftRightImm srdi
|
||||
@ -51,7 +50,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
|
||||
#define ShiftRightArith srad
|
||||
#else
|
||||
#define LoadPUX lwzux
|
||||
#define StorePX stwx
|
||||
#define StorePUX stwux
|
||||
#define ShiftLeftImm slwi
|
||||
#define ShiftRightImm srwi
|
||||
@ -148,7 +146,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void LoadU64(Register dst, const MemOperand& mem, Register scratch = no_reg);
|
||||
void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
|
||||
void LoadS32(Register dst, const MemOperand& mem, Register scratch = no_reg);
|
||||
void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
|
||||
void StoreU64(Register src, const MemOperand& mem, Register scratch = no_reg);
|
||||
void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
|
||||
|
||||
void LoadDouble(DoubleRegister dst, const MemOperand& mem,
|
||||
@ -210,32 +208,32 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// Push two registers. Pushes leftmost register first (to highest address).
|
||||
void Push(Register src1, Register src2) {
|
||||
StorePU(src2, MemOperand(sp, -2 * kSystemPointerSize));
|
||||
StoreP(src1, MemOperand(sp, kSystemPointerSize));
|
||||
StoreU64(src1, MemOperand(sp, kSystemPointerSize));
|
||||
}
|
||||
|
||||
// Push three registers. Pushes leftmost register first (to highest address).
|
||||
void Push(Register src1, Register src2, Register src3) {
|
||||
StorePU(src3, MemOperand(sp, -3 * kSystemPointerSize));
|
||||
StoreP(src2, MemOperand(sp, kSystemPointerSize));
|
||||
StoreP(src1, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
StoreU64(src2, MemOperand(sp, kSystemPointerSize));
|
||||
StoreU64(src1, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
}
|
||||
|
||||
// Push four registers. Pushes leftmost register first (to highest address).
|
||||
void Push(Register src1, Register src2, Register src3, Register src4) {
|
||||
StorePU(src4, MemOperand(sp, -4 * kSystemPointerSize));
|
||||
StoreP(src3, MemOperand(sp, kSystemPointerSize));
|
||||
StoreP(src2, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
StoreP(src1, MemOperand(sp, 3 * kSystemPointerSize));
|
||||
StoreU64(src3, MemOperand(sp, kSystemPointerSize));
|
||||
StoreU64(src2, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
StoreU64(src1, MemOperand(sp, 3 * kSystemPointerSize));
|
||||
}
|
||||
|
||||
// Push five registers. Pushes leftmost register first (to highest address).
|
||||
void Push(Register src1, Register src2, Register src3, Register src4,
|
||||
Register src5) {
|
||||
StorePU(src5, MemOperand(sp, -5 * kSystemPointerSize));
|
||||
StoreP(src4, MemOperand(sp, kSystemPointerSize));
|
||||
StoreP(src3, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
StoreP(src2, MemOperand(sp, 3 * kSystemPointerSize));
|
||||
StoreP(src1, MemOperand(sp, 4 * kSystemPointerSize));
|
||||
StoreU64(src4, MemOperand(sp, kSystemPointerSize));
|
||||
StoreU64(src3, MemOperand(sp, 2 * kSystemPointerSize));
|
||||
StoreU64(src2, MemOperand(sp, 3 * kSystemPointerSize));
|
||||
StoreU64(src1, MemOperand(sp, 4 * kSystemPointerSize));
|
||||
}
|
||||
|
||||
enum PushArrayOrder { kNormal, kReverse };
|
||||
@ -738,7 +736,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
}
|
||||
|
||||
void StoreReceiver(Register rec, Register argc, Register scratch) {
|
||||
StoreP(rec, MemOperand(sp, 0));
|
||||
StoreU64(rec, MemOperand(sp, 0));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
@ -1040,8 +1040,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ bind(&start_call);
|
||||
__ LoadPC(kScratchReg);
|
||||
__ addi(kScratchReg, kScratchReg, Operand(offset));
|
||||
__ StoreP(kScratchReg,
|
||||
MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
|
||||
__ StoreU64(kScratchReg,
|
||||
MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
|
||||
__ mtlr(r0);
|
||||
}
|
||||
#endif // V8_ENABLE_WEBASSEMBLY
|
||||
@ -1795,8 +1795,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ StoreSimd128(i.InputSimd128Register(0), MemOperand(ip, sp));
|
||||
}
|
||||
} else {
|
||||
__ StoreP(i.InputRegister(0), MemOperand(sp, slot * kSystemPointerSize),
|
||||
r0);
|
||||
__ StoreU64(i.InputRegister(0),
|
||||
MemOperand(sp, slot * kSystemPointerSize), r0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -4284,7 +4284,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
if (destination->IsRegister()) {
|
||||
__ Move(g.ToRegister(destination), src);
|
||||
} else {
|
||||
__ StoreP(src, g.ToMemOperand(destination), r0);
|
||||
__ StoreU64(src, g.ToMemOperand(destination), r0);
|
||||
}
|
||||
} else if (source->IsStackSlot()) {
|
||||
DCHECK(destination->IsRegister() || destination->IsStackSlot());
|
||||
@ -4294,7 +4294,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
} else {
|
||||
Register temp = kScratchReg;
|
||||
__ LoadU64(temp, src, r0);
|
||||
__ StoreP(temp, g.ToMemOperand(destination), r0);
|
||||
__ StoreU64(temp, g.ToMemOperand(destination), r0);
|
||||
}
|
||||
} else if (source->IsConstant()) {
|
||||
Constant src = g.ToConstant(source);
|
||||
@ -4361,7 +4361,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
break;
|
||||
}
|
||||
if (destination->IsStackSlot()) {
|
||||
__ StoreP(dst, g.ToMemOperand(destination), r0);
|
||||
__ StoreU64(dst, g.ToMemOperand(destination), r0);
|
||||
}
|
||||
} else {
|
||||
DoubleRegister dst = destination->IsFPRegister()
|
||||
|
@ -165,7 +165,7 @@ void RegExpMacroAssemblerPPC::AdvanceRegister(int reg, int by) {
|
||||
__ LoadU64(r3, register_location(reg), r0);
|
||||
__ mov(r0, Operand(by));
|
||||
__ add(r3, r3, r0);
|
||||
__ StoreP(r3, register_location(reg), r0);
|
||||
__ StoreU64(r3, register_location(reg), r0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -176,7 +176,7 @@ void RegExpMacroAssemblerPPC::Backtrack() {
|
||||
Label next;
|
||||
__ LoadU64(r3, MemOperand(frame_pointer(), kBacktrackCount), r0);
|
||||
__ addi(r3, r3, Operand(1));
|
||||
__ StoreP(r3, MemOperand(frame_pointer(), kBacktrackCount), r0);
|
||||
__ StoreU64(r3, MemOperand(frame_pointer(), kBacktrackCount), r0);
|
||||
__ mov(r0, Operand(backtrack_limit()));
|
||||
__ cmp(r3, r0);
|
||||
__ bne(&next);
|
||||
@ -759,7 +759,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
|
||||
}
|
||||
// Store this value in a local variable, for use when clearing
|
||||
// position registers.
|
||||
__ StoreP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
|
||||
__ StoreU64(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
|
||||
|
||||
// Initialize code pointer register
|
||||
__ mov(code_pointer(), Operand(masm_->CodeObject()));
|
||||
@ -792,7 +792,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
|
||||
__ bdnz(&init_loop);
|
||||
} else {
|
||||
for (int i = 0; i < num_saved_registers_; i++) {
|
||||
__ StoreP(r3, register_location(i), r0);
|
||||
__ StoreU64(r3, register_location(i), r0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -855,7 +855,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
|
||||
__ LoadU64(r5, MemOperand(frame_pointer(), kRegisterOutput));
|
||||
// Increment success counter.
|
||||
__ addi(r3, r3, Operand(1));
|
||||
__ StoreP(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
|
||||
__ StoreU64(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
|
||||
// Capture results have been stored, so the number of remaining global
|
||||
// output registers is reduced by the number of stored captures.
|
||||
__ subi(r4, r4, Operand(num_saved_registers_));
|
||||
@ -863,10 +863,10 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
|
||||
__ cmpi(r4, Operand(num_saved_registers_));
|
||||
__ blt(&return_r3);
|
||||
|
||||
__ StoreP(r4, MemOperand(frame_pointer(), kNumOutputRegisters));
|
||||
__ StoreU64(r4, MemOperand(frame_pointer(), kNumOutputRegisters));
|
||||
// Advance the location for output.
|
||||
__ addi(r5, r5, Operand(num_saved_registers_ * kIntSize));
|
||||
__ StoreP(r5, MemOperand(frame_pointer(), kRegisterOutput));
|
||||
__ StoreU64(r5, MemOperand(frame_pointer(), kRegisterOutput));
|
||||
|
||||
// Prepare r3 to initialize registers with its value in the next run.
|
||||
__ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
|
||||
@ -1022,7 +1022,7 @@ void RegExpMacroAssemblerPPC::PopCurrentPosition() {
|
||||
|
||||
void RegExpMacroAssemblerPPC::PopRegister(int register_index) {
|
||||
Pop(r3);
|
||||
__ StoreP(r3, register_location(register_index), r0);
|
||||
__ StoreU64(r3, register_location(register_index), r0);
|
||||
}
|
||||
|
||||
|
||||
@ -1074,7 +1074,7 @@ void RegExpMacroAssemblerPPC::SetCurrentPositionFromEnd(int by) {
|
||||
void RegExpMacroAssemblerPPC::SetRegister(int register_index, int to) {
|
||||
DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
|
||||
__ mov(r3, Operand(to));
|
||||
__ StoreP(r3, register_location(register_index), r0);
|
||||
__ StoreU64(r3, register_location(register_index), r0);
|
||||
}
|
||||
|
||||
|
||||
@ -1087,11 +1087,11 @@ bool RegExpMacroAssemblerPPC::Succeed() {
|
||||
void RegExpMacroAssemblerPPC::WriteCurrentPositionToRegister(int reg,
|
||||
int cp_offset) {
|
||||
if (cp_offset == 0) {
|
||||
__ StoreP(current_input_offset(), register_location(reg), r0);
|
||||
__ StoreU64(current_input_offset(), register_location(reg), r0);
|
||||
} else {
|
||||
__ mov(r0, Operand(cp_offset * char_size()));
|
||||
__ add(r3, current_input_offset(), r0);
|
||||
__ StoreP(r3, register_location(reg), r0);
|
||||
__ StoreU64(r3, register_location(reg), r0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1100,7 +1100,7 @@ void RegExpMacroAssemblerPPC::ClearRegisters(int reg_from, int reg_to) {
|
||||
DCHECK(reg_from <= reg_to);
|
||||
__ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
|
||||
for (int reg = reg_from; reg <= reg_to; reg++) {
|
||||
__ StoreP(r3, register_location(reg), r0);
|
||||
__ StoreU64(r3, register_location(reg), r0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1108,7 +1108,7 @@ void RegExpMacroAssemblerPPC::ClearRegisters(int reg_from, int reg_to) {
|
||||
void RegExpMacroAssemblerPPC::WriteStackPointerToRegister(int reg) {
|
||||
__ LoadU64(r4, MemOperand(frame_pointer(), kStackHighEnd));
|
||||
__ sub(r3, backtrack_stackpointer(), r4);
|
||||
__ StoreP(r3, register_location(reg), r0);
|
||||
__ StoreU64(r3, register_location(reg), r0);
|
||||
}
|
||||
|
||||
|
||||
@ -1134,8 +1134,8 @@ void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
|
||||
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
|
||||
__ ClearRightImm(sp, sp,
|
||||
Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
|
||||
__ StoreP(scratch,
|
||||
MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
|
||||
__ StoreU64(scratch,
|
||||
MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
|
||||
} else {
|
||||
// Make room for stack arguments
|
||||
stack_space += stack_passed_arguments;
|
||||
|
@ -352,7 +352,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
|
||||
// Special straight-line code for up to nine words. Generates one
|
||||
// instruction per word.
|
||||
for (int offset = 4; offset <= size; offset += 4) {
|
||||
StoreP(r0, liftoff::GetHalfStackSlot(start + offset, kLowWord));
|
||||
StoreU64(r0, liftoff::GetHalfStackSlot(start + offset, kLowWord));
|
||||
}
|
||||
} else {
|
||||
// General case for bigger counts (9 instructions).
|
||||
@ -364,7 +364,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
|
||||
|
||||
Label loop;
|
||||
bind(&loop);
|
||||
StoreP(r0, MemOperand(r0));
|
||||
StoreU64(r0, MemOperand(r0));
|
||||
addi(r0, r0, Operand(kSystemPointerSize));
|
||||
cmp(r4, r5);
|
||||
bne(&loop);
|
||||
|
Loading…
Reference in New Issue
Block a user