[arm] Port native routines to use UseScratchRegisterScope

Make use of UseScratchRegisterScope instead of using the ip register directly in
code stubs, builtin and the deoptimizer. In a lot of cases, we can
simply use a different register rather than using the new scope.

Bug: v8:6553
Change-Id: Ibc8a9a78bb88f3850c6e8b45871cc3a5b3971b3b
Reviewed-on: https://chromium-review.googlesource.com/544837
Commit-Queue: Pierre Langlois <pierre.langlois@arm.com>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46429}
This commit is contained in:
Pierre Langlois 2017-06-09 11:15:48 +01:00 committed by Commit Bot
parent 6cb999b97b
commit f6aed61992
4 changed files with 203 additions and 148 deletions

View File

@ -1006,9 +1006,16 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ mov(r5, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
__ ldr(r5, MemOperand(r5));
__ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
__ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
ip.bit());
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
// Push a bad frame pointer to fail if it is used.
__ mov(scratch, Operand(-1));
__ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | scratch.bit());
}
Register scratch = r6;
// Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@ -1017,17 +1024,17 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
Label non_outermost_js;
ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate());
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
__ ldr(r6, MemOperand(r5));
__ cmp(r6, Operand::Zero());
__ ldr(scratch, MemOperand(r5));
__ cmp(scratch, Operand::Zero());
__ b(ne, &non_outermost_js);
__ str(fp, MemOperand(r5));
__ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
__ bind(&non_outermost_js);
__ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
__ push(ip);
__ push(scratch);
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
@ -1044,10 +1051,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ mov(ip, Operand(ExternalReference(
IsolateAddressId::kPendingExceptionAddress, isolate())));
__ mov(scratch,
Operand(ExternalReference(IsolateAddressId::kPendingExceptionAddress,
isolate())));
}
__ str(r0, MemOperand(ip));
__ str(r0, MemOperand(scratch));
__ LoadRoot(r0, Heap::kExceptionRootIndex);
__ b(&exit);
@ -1073,16 +1081,16 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
if (type() == StackFrame::ENTRY_CONSTRUCT) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
isolate());
__ mov(ip, Operand(construct_entry));
__ mov(scratch, Operand(construct_entry));
} else {
ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
__ mov(ip, Operand(entry));
__ mov(scratch, Operand(entry));
}
__ ldr(ip, MemOperand(ip)); // deref address
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ ldr(scratch, MemOperand(scratch)); // deref address
__ add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
// Branch and link to JSEntryTrampoline.
__ Call(ip);
__ Call(scratch);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@ -1100,9 +1108,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Restore the top frame descriptors from the stack.
__ pop(r3);
__ mov(ip, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
__ str(r3, MemOperand(ip));
__ mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
__ str(r3, MemOperand(scratch));
// Reset the stack to the callee saved registers.
__ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@ -1203,8 +1211,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// write-barrier is needed.
__ bind(&megamorphic);
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
__ LoadRoot(r4, Heap::kmegamorphic_symbolRootIndex);
__ str(r4, FieldMemOperand(r5, FixedArray::kHeaderSize));
__ jmp(&done);
// An uninitialized cache is patched with the function
@ -1296,8 +1304,8 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ bind(&got_smi_index_);
// Check for index out of range.
__ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
__ cmp(ip, Operand(index_));
__ ldr(result_, FieldMemOperand(object_, String::kLengthOffset));
__ cmp(result_, Operand(index_));
__ b(ls, index_out_of_range_);
__ SmiUntag(index_);
@ -1796,22 +1804,22 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
Register scratch = r2;
{
// Call the runtime system in a fresh internal frame.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
__ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op())));
__ push(ip);
__ mov(scratch, Operand(Smi::FromInt(op())));
__ push(scratch);
__ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ add(scratch, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
__ pop(lr);
__ Pop(r1, r0);
}
__ Jump(r2);
__ Jump(scratch);
}
@ -2245,22 +2253,27 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ and_(sp, sp, Operand(-frame_alignment));
}
#if V8_HOST_ARCH_ARM
int32_t entry_hook =
reinterpret_cast<int32_t>(isolate()->function_entry_hook());
__ mov(ip, Operand(entry_hook));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
// It additionally takes an isolate as a third parameter
__ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ mov(ip, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
isolate())));
#if V8_HOST_ARCH_ARM
int32_t entry_hook =
reinterpret_cast<int32_t>(isolate()->function_entry_hook());
__ mov(scratch, Operand(entry_hook));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
// It additionally takes an isolate as a third parameter
__ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ mov(scratch,
Operand(ExternalReference(
&dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
#endif
__ Call(ip);
__ Call(scratch);
}
// Restore the stack pointer if needed.
if (frame_alignment > kPointerSize) {
@ -2648,8 +2661,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
}
__ sub(r6, r6, Operand(1));
__ str(r6, MemOperand(r9, kLevelOffset));
__ ldr(ip, MemOperand(r9, kLimitOffset));
__ cmp(r5, ip);
__ ldr(r6, MemOperand(r9, kLimitOffset));
__ cmp(r5, r6);
__ b(ne, &delete_allocated_handles);
// Leave the API exit frame.
@ -2668,8 +2681,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
__ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
__ ldr(r5, MemOperand(ip));
__ mov(r6, Operand(ExternalReference::scheduled_exception_address(isolate)));
__ ldr(r5, MemOperand(r6));
__ cmp(r4, r5);
__ b(ne, &promote_scheduled_exception);
@ -2739,20 +2752,22 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// call data
__ push(call_data);
Register scratch = call_data;
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
Register scratch0 = call_data;
Register scratch1 = r5;
__ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
// return value
__ push(scratch);
__ push(scratch0);
// return value default
__ push(scratch);
__ push(scratch0);
// isolate
__ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
__ push(scratch);
__ mov(scratch1,
Operand(ExternalReference::isolate_address(masm->isolate())));
__ push(scratch1);
// holder
__ push(holder);
// Prepare arguments.
__ mov(scratch, sp);
__ mov(scratch0, sp);
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@ -2761,18 +2776,19 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
DCHECK(!api_function_address.is(r0) && !scratch0.is(r0));
// r0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ add(r0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
__ str(scratch, MemOperand(r0, 0 * kPointerSize));
__ str(scratch0, MemOperand(r0, 0 * kPointerSize));
// FunctionCallbackInfo::values_
__ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ str(ip, MemOperand(r0, 1 * kPointerSize));
__ add(scratch1, scratch0,
Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ str(scratch1, MemOperand(r0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
__ mov(ip, Operand(argc()));
__ str(ip, MemOperand(r0, 2 * kPointerSize));
__ mov(scratch0, Operand(argc()));
__ str(scratch0, MemOperand(r0, 2 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());

View File

@ -143,7 +143,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ ldr(temp1, MemOperand(src, 4, PostIndex));
__ str(temp1, MemOperand(dest, 4, PostIndex));
} else {
Register temp2 = ip;
UseScratchRegisterScope temps(&masm);
Register temp2 = temps.Acquire();
Label loop;
__ bic(temp2, chars, Operand(0x3), SetCC);
@ -219,8 +220,10 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
__ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
__ Ret();
} else {
UseScratchRegisterScope temps(&masm);
Register temp1 = r3;
Register temp2 = ip;
Register temp2 = temps.Acquire();
Register temp3 = lr;
Register temp4 = r4;
Label loop;

View File

@ -111,9 +111,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// We use a run-time check for VFP32DREGS.
CpuFeatureScope scope(masm(), VFP32DREGS,
CpuFeatureScope::kDontCheckSupported);
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
__ CheckFor32DRegs(scratch);
// Push registers d0-d15, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
@ -130,9 +132,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// handle this a bit differently.
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
__ mov(ip, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
__ str(fp, MemOperand(ip));
{
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ mov(scratch, Operand(ExternalReference(
IsolateAddressId::kCEntryFPAddress, isolate())));
__ str(fp, MemOperand(scratch));
}
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
@ -294,15 +300,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Restore the registers from the stack.
__ ldm(ia_w, sp, restored_regs); // all but pc registers.
__ pop(ip); // remove sp
__ pop(ip); // remove lr
__ InitializeRootRegister();
__ pop(ip); // remove pc
__ pop(ip); // get continuation, leave pc on stack
__ pop(lr);
__ Jump(ip);
// Remove sp, lr and pc.
__ Drop(3);
{
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ pop(scratch); // get continuation, leave pc on stack
__ pop(lr);
__ Jump(scratch);
}
__ stop("Unreachable.");
}
@ -315,13 +324,15 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we
// need two instructions.
STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff);
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(masm(), ARMv7);
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ movw(ip, i);
__ movw(scratch, i);
__ b(&done);
DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
}
@ -337,14 +348,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ mov(ip, Operand(i & 0xff)); // Set the low byte.
__ mov(scratch, Operand(i & 0xff)); // Set the low byte.
__ b(&high_fixes[i >> 8]); // Jump to the secondary table.
DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
}
// Generate the secondary table, to set the high byte.
for (int high = 1; high <= high_fix_max; high++) {
__ bind(&high_fixes[high]);
__ orr(ip, ip, Operand(high << 8));
__ orr(scratch, scratch, Operand(high << 8));
// If this isn't the last entry, emit a branch to the end of the table.
// The last entry can just fall through.
if (high < high_fix_max) __ b(&high_fixes[0]);
@ -354,7 +365,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// through with no additional branch.
__ bind(&high_fixes[0]);
}
__ push(ip);
__ push(scratch);
}

View File

@ -439,6 +439,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
Register scratch = r2;
// Enter a construct frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
@ -469,8 +471,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -----------------------------------
__ b(&entry);
__ bind(&loop);
__ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
__ push(ip);
__ ldr(scratch, MemOperand(r4, r5, LSL, kPointerSizeLog2));
__ push(scratch);
__ bind(&entry);
__ sub(r5, r5, Operand(1), SetCC);
__ b(ge, &loop);
@ -486,13 +488,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Restore context from the frame.
__ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
__ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ ldr(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(sp, sp, Operand(scratch, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(sp, sp, Operand(kPointerSize));
__ Jump(lr);
}
@ -592,9 +594,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- sp[4*kPointerSize]: context
// -----------------------------------
__ b(&entry);
__ bind(&loop);
__ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
__ push(ip);
__ ldr(r6, MemOperand(r4, r5, LSL, kPointerSizeLog2));
__ push(r6);
__ bind(&entry);
__ sub(r5, r5, Operand(1), SetCC);
__ b(ge, &loop);
@ -731,28 +734,31 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
__ ldr(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
Register scratch = r5;
// Flood function if we are stepping.
ExternalReference debug_hook =
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
__ mov(ip, Operand(debug_hook));
__ ldrsb(ip, MemOperand(ip));
__ cmp(ip, Operand(0));
__ mov(scratch, Operand(debug_hook));
__ ldrsb(scratch, MemOperand(scratch));
__ cmp(scratch, Operand(0));
__ b(ne, &prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
// Flood function if we need to continue stepping in the suspended
// generator.
ExternalReference debug_suspended_generator =
ExternalReference::debug_suspended_generator_address(masm->isolate());
__ mov(ip, Operand(debug_suspended_generator));
__ ldr(ip, MemOperand(ip));
__ cmp(ip, Operand(r1));
__ mov(scratch, Operand(debug_suspended_generator));
__ ldr(scratch, MemOperand(scratch));
__ cmp(scratch, Operand(r1));
__ b(eq, &prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
// Push receiver.
__ ldr(ip, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
__ Push(ip);
__ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
// ----------- S t a t e -------------
// -- r1 : the JSGeneratorObject to resume
@ -798,8 +804,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(r3, r1);
__ Move(r1, r4);
__ ldr(r5, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ Jump(r5);
__ ldr(scratch, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ Jump(scratch);
}
__ bind(&prepare_step_in_if_stepping);
@ -1056,14 +1062,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, kExpectedOptimizationSentinel);
}
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
// Checking whether the queued function is ready for install is
// optional, since we come across interrupts and stack checks elsewhere.
// However, not checking may delay installing ready functions, and
// always checking would be quite expensive. A good compromise is to
// first check against stack limit as a cue for an interrupt signal.
__ LoadRoot(scratch2, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(scratch2));
__ b(hs, &fallthrough);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
}
@ -1229,9 +1234,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Dispatch to the first bytecode handler for the function.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
__ ldr(r4, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
__ Call(ip);
__ Call(r4);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// The return value is in r0.
@ -1361,8 +1366,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
Label stack_overflow;
// Push a slot for the receiver to be constructed.
__ mov(ip, Operand::Zero());
__ push(ip);
__ mov(r5, Operand::Zero());
__ push(r5);
Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
@ -1415,8 +1420,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructArray(
Label stack_overflow;
// Push a slot for the receiver to be constructed.
__ mov(ip, Operand::Zero());
__ push(ip);
__ mov(r5, Operand::Zero());
__ push(r5);
Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
@ -1473,9 +1478,11 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
__ mov(pc, ip);
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ ldr(scratch, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
__ Jump(scratch);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@ -1741,11 +1748,14 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
}
__ ldr(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(ip);
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ Pop(scratch);
__ add(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(lr);
__ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ add(pc, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
}
} // namespace
@ -1949,13 +1959,14 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r0: actual number of arguments
// r1: callable
{
Register scratch = r3;
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ bind(&loop);
__ ldr(ip, MemOperand(r2, -kPointerSize));
__ str(ip, MemOperand(r2));
__ ldr(scratch, MemOperand(r2, -kPointerSize));
__ str(scratch, MemOperand(r2));
__ sub(r2, r2, Operand(kPointerSize));
__ cmp(r2, sp);
__ b(ne, &loop);
@ -2092,17 +2103,19 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -----------------------------------
__ AssertFixedArray(r2);
Register scratch = r8;
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
// Make ip the space we have left. The stack might already be overflowed
// here which will cause ip to become negative.
__ sub(ip, sp, ip);
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// The stack might already be overflowed here which will cause 'scratch' to
// become negative.
__ sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
__ cmp(ip, Operand(r4, LSL, kPointerSizeLog2));
__ cmp(scratch, Operand(r4, LSL, kPointerSizeLog2));
__ b(gt, &done); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
@ -2116,11 +2129,11 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&loop);
__ cmp(r6, r4);
__ b(eq, &done);
__ add(ip, r2, Operand(r6, LSL, kPointerSizeLog2));
__ ldr(ip, FieldMemOperand(ip, FixedArray::kHeaderSize));
__ cmp(ip, r5);
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex, eq);
__ Push(ip);
__ add(scratch, r2, Operand(r6, LSL, kPointerSizeLog2));
__ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ cmp(scratch, r5);
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
__ Push(scratch);
__ add(r6, r6, Operand(1));
__ b(&loop);
__ bind(&done);
@ -2141,11 +2154,15 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -- r2 : start index (to support rest parameters)
// -----------------------------------
Register scratch = r6;
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ ldr(scratch,
MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(scratch,
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &arguments_adaptor);
{
__ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@ -2177,8 +2194,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ add(r0, r0, r5);
__ bind(&loop);
{
__ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
__ push(ip);
__ ldr(scratch, MemOperand(r4, r5, LSL, kPointerSizeLog2));
__ push(scratch);
__ sub(r5, r5, Operand(1), SetCC);
__ b(ne, &loop);
}
@ -2438,6 +2455,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
Register scratch = r6;
// Relocate arguments down the stack.
{
Label loop, done_loop;
@ -2445,8 +2464,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ cmp(r5, r0);
__ b(gt, &done_loop);
__ ldr(ip, MemOperand(sp, r4, LSL, kPointerSizeLog2));
__ str(ip, MemOperand(sp, r5, LSL, kPointerSizeLog2));
__ ldr(scratch, MemOperand(sp, r4, LSL, kPointerSizeLog2));
__ str(scratch, MemOperand(sp, r5, LSL, kPointerSizeLog2));
__ add(r4, r4, Operand(1));
__ add(r5, r5, Operand(1));
__ b(&loop);
@ -2461,8 +2480,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ sub(r4, r4, Operand(1), SetCC);
__ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2));
__ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ ldr(scratch, MemOperand(r2, r4, LSL, kPointerSizeLog2));
__ str(scratch, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ add(r0, r0, Operand(1));
__ b(gt, &loop);
}
@ -2486,18 +2505,19 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// Patch the receiver to [[BoundThis]].
__ ldr(ip, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
__ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
__ str(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
__ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
__ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
__ mov(r3, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
masm->isolate())));
__ ldr(ip, MemOperand(ip));
__ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ ldr(r3, MemOperand(r3));
__ add(pc, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
}
// static
@ -2599,9 +2619,10 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Construct the [[BoundTargetFunction]] via the Construct builtin.
__ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
__ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
__ ldr(ip, MemOperand(ip));
__ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ mov(r2, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
__ ldr(r2, MemOperand(r2));
__ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
}
// static
@ -2726,10 +2747,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ b(eq, &dont_adapt_arguments);
Register scratch = r5;
{ // Enough parameters: actual >= expected
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
Generate_StackOverflowCheck(masm, r2, scratch, &stack_overflow);
// Calculate copy start address into r0 and copy end address into r4.
// r0: actual number of arguments as a smi
@ -2750,8 +2773,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy;
__ bind(&copy);
__ ldr(ip, MemOperand(r0, 0));
__ push(ip);
__ ldr(scratch, MemOperand(r0, 0));
__ push(scratch);
__ cmp(r0, r4); // Compare before moving to next argument.
__ sub(r0, r0, Operand(kPointerSize));
__ b(ne, &copy);
@ -2762,7 +2785,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
Generate_StackOverflowCheck(masm, r2, scratch, &stack_overflow);
// Calculate copy start address into r0 and copy end address is fp.
// r0: actual number of arguments as a smi
@ -2778,9 +2801,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: new target (passed through to callee)
Label copy;
__ bind(&copy);
// Adjust load for return address and receiver.
__ ldr(ip, MemOperand(r0, 2 * kPointerSize));
__ push(ip);
__ ldr(scratch, MemOperand(r0, 2 * kPointerSize));
__ push(scratch);
__ cmp(r0, fp); // Compare before moving to next argument.
__ sub(r0, r0, Operand(kPointerSize));
__ b(ne, &copy);
@ -2789,7 +2814,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r1: function
// r2: expected number of arguments
// r3: new target (passed through to callee)
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
// Adjust for frame.
__ sub(r4, r4, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
@ -2797,7 +2822,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label fill;
__ bind(&fill);
__ push(ip);
__ push(scratch);
__ cmp(sp, r4);
__ b(ne, &fill);
}