From 72c370767226cf573d316655b1d3e3d3d699cc9b Mon Sep 17 00:00:00 2001 From: rmcilroy Date: Tue, 3 Jan 2017 07:18:21 -0800 Subject: [PATCH] [Ignition] Teach CompileLazy about interpreted functions. Currently the CompileLazy builtin checks the SFI expliciltly for FCG code. This means if the SFI has bytecode we have to go through to the runtime to install the interpreter entry trampoline into the JSFunction object. Modify the builtin to always put the SFI code object into the JSFunction unless it's the lazy compile stub on the SFI as well. BUG=v8:4380 Review-Url: https://codereview.chromium.org/2583693003 Cr-Commit-Position: refs/heads/master@{#42034} --- src/builtins/arm/builtins-arm.cc | 12 ++++++------ src/builtins/arm64/builtins-arm64.cc | 12 ++++++------ src/builtins/ia32/builtins-ia32.cc | 12 ++++++------ src/builtins/mips/builtins-mips.cc | 12 ++++++------ src/builtins/mips64/builtins-mips64.cc | 12 ++++++------ src/builtins/x64/builtins-x64.cc | 12 ++++++------ 6 files changed, 36 insertions(+), 36 deletions(-) diff --git a/src/builtins/arm/builtins-arm.cc b/src/builtins/arm/builtins-arm.cc index a60ab391a9..2fd8541bea 100644 --- a/src/builtins/arm/builtins-arm.cc +++ b/src/builtins/arm/builtins-arm.cc @@ -1442,14 +1442,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { SharedFunctionInfo::kMarkedForTierUpByteOffset)); __ tst(r5, Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte)); __ b(ne, &gotta_call_runtime_no_stack); - // Is the full code valid? + + // If SFI points to anything other than CompileLazy, install that. __ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); - __ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset)); - __ and_(r5, r5, Operand(Code::KindField::kMask)); - __ mov(r5, Operand(r5, LSR, Code::KindField::kShift)); - __ cmp(r5, Operand(Code::BUILTIN)); + __ Move(r5, masm->CodeObject()); + __ cmp(entry, r5); __ b(eq, &gotta_call_runtime_no_stack); - // Yes, install the full code. + + // Install the SFI's code entry. __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); __ RecordWriteCodeEntryField(closure, entry, r5); diff --git a/src/builtins/arm64/builtins-arm64.cc b/src/builtins/arm64/builtins-arm64.cc index 13a2e33f50..de227f266d 100644 --- a/src/builtins/arm64/builtins-arm64.cc +++ b/src/builtins/arm64/builtins-arm64.cc @@ -1435,14 +1435,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { __ TestAndBranchIfAnySet( temp, 1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte, &gotta_call_runtime); - // Is the full code valid? + + // If SFI points to anything other than CompileLazy, install that. __ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); - __ Ldr(x5, FieldMemOperand(entry, Code::kFlagsOffset)); - __ and_(x5, x5, Operand(Code::KindField::kMask)); - __ Mov(x5, Operand(x5, LSR, Code::KindField::kShift)); - __ Cmp(x5, Operand(Code::BUILTIN)); + __ Move(temp, masm->CodeObject()); + __ Cmp(entry, temp); __ B(eq, &gotta_call_runtime); - // Yes, install the full code. + + // Install the SFI's code entry. __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); __ RecordWriteCodeEntryField(closure, entry, x5); diff --git a/src/builtins/ia32/builtins-ia32.cc b/src/builtins/ia32/builtins-ia32.cc index 0f23ec1ebb..bd638672b1 100644 --- a/src/builtins/ia32/builtins-ia32.cc +++ b/src/builtins/ia32/builtins-ia32.cc @@ -1122,14 +1122,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { __ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset), Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte)); __ j(not_zero, &gotta_call_runtime_no_stack); - // Is the full code valid? + + // If SFI points to anything other than CompileLazy, install that. __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset)); - __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset)); - __ and_(ebx, Code::KindField::kMask); - __ shr(ebx, Code::KindField::kShift); - __ cmp(ebx, Immediate(Code::BUILTIN)); + __ Move(ebx, masm->CodeObject()); + __ cmp(entry, ebx); __ j(equal, &gotta_call_runtime_no_stack); - // Yes, install the full code. + + // Install the SFI's code entry. __ lea(entry, FieldOperand(entry, Code::kHeaderSize)); __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry); __ RecordWriteCodeEntryField(closure, entry, ebx); diff --git a/src/builtins/mips/builtins-mips.cc b/src/builtins/mips/builtins-mips.cc index 2586a6c081..fb08099ab3 100644 --- a/src/builtins/mips/builtins-mips.cc +++ b/src/builtins/mips/builtins-mips.cc @@ -1452,13 +1452,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { __ And(t1, t1, Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte)); __ Branch(&gotta_call_runtime_no_stack, ne, t1, Operand(zero_reg)); - // Is the full code valid? + + // If SFI points to anything other than CompileLazy, install that. __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); - __ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset)); - __ And(t1, t1, Operand(Code::KindField::kMask)); - __ srl(t1, t1, Code::KindField::kShift); - __ Branch(&gotta_call_runtime_no_stack, eq, t1, Operand(Code::BUILTIN)); - // Yes, install the full code. + __ Move(t1, masm->CodeObject()); + __ Branch(&gotta_call_runtime_no_stack, eq, entry, Operand(t1)); + + // Install the SFI's code entry. __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); __ RecordWriteCodeEntryField(closure, entry, t1); diff --git a/src/builtins/mips64/builtins-mips64.cc b/src/builtins/mips64/builtins-mips64.cc index bf023c0674..e90801d420 100644 --- a/src/builtins/mips64/builtins-mips64.cc +++ b/src/builtins/mips64/builtins-mips64.cc @@ -1443,13 +1443,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { __ And(a5, a5, Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte)); __ Branch(&gotta_call_runtime_no_stack, ne, a5, Operand(zero_reg)); - // Is the full code valid? + + // If SFI points to anything other than CompileLazy, install that. __ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); - __ lw(a5, FieldMemOperand(entry, Code::kFlagsOffset)); - __ And(a5, a5, Operand(Code::KindField::kMask)); - __ dsrl(a5, a5, Code::KindField::kShift); - __ Branch(&gotta_call_runtime_no_stack, eq, a5, Operand(Code::BUILTIN)); - // Yes, install the full code. + __ Move(t1, masm->CodeObject()); + __ Branch(&gotta_call_runtime_no_stack, eq, entry, Operand(t1)); + + // Install the SFI's code entry. __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); __ RecordWriteCodeEntryField(closure, entry, a5); diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc index 0f524e656f..87dfc7d3a6 100644 --- a/src/builtins/x64/builtins-x64.cc +++ b/src/builtins/x64/builtins-x64.cc @@ -1076,14 +1076,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { __ testb(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset), Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte)); __ j(not_zero, &gotta_call_runtime); - // Is the full code valid? + + // If SFI points to anything other than CompileLazy, install that. __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset)); - __ movl(rbx, FieldOperand(entry, Code::kFlagsOffset)); - __ andl(rbx, Immediate(Code::KindField::kMask)); - __ shrl(rbx, Immediate(Code::KindField::kShift)); - __ cmpl(rbx, Immediate(Code::BUILTIN)); + __ Move(rbx, masm->CodeObject()); + __ cmpp(entry, rbx); __ j(equal, &gotta_call_runtime); - // Yes, install the full code. + + // Install the SFI's code entry. __ leap(entry, FieldOperand(entry, Code::kHeaderSize)); __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry); __ RecordWriteCodeEntryField(closure, entry, r15);