[sparkplug] Upstream Sparkplug
Sparkplug is a new baseline, non-optimising second-tier compiler, designed to fit in the compiler trade-off space between Ignition and TurboProp/TurboFan. Design doc: https://docs.google.com/document/d/13c-xXmFOMcpUQNqo66XWQt3u46TsBjXrHrh4c045l-A/edit?usp=sharing Bug: v8:11420 Change-Id: Ideb7270db3d6548eedd8337a3f596eb6f8fea6b1 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2667514 Commit-Queue: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Michael Stanton <mvstanton@chromium.org> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Jakob Gruber <jgruber@chromium.org> Reviewed-by: Michael Achenbach <machenbach@chromium.org> Reviewed-by: Hannes Payer <hpayer@chromium.org> Cr-Commit-Position: refs/heads/master@{#72686}
This commit is contained in:
parent
27b8ad2077
commit
c053419e8c
13
BUILD.gn
13
BUILD.gn
@ -2520,6 +2520,10 @@ v8_source_set("v8_base_without_compiler") {
|
||||
"src/ast/source-range-ast-visitor.h",
|
||||
"src/ast/variables.cc",
|
||||
"src/ast/variables.h",
|
||||
"src/baseline/baseline-compiler.cc",
|
||||
"src/baseline/baseline-compiler.h",
|
||||
"src/baseline/baseline.cc",
|
||||
"src/baseline/baseline.h",
|
||||
"src/builtins/accessors.cc",
|
||||
"src/builtins/accessors.h",
|
||||
"src/builtins/builtins-api.cc",
|
||||
@ -3683,6 +3687,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||
|
||||
if (v8_current_cpu == "x86") {
|
||||
sources += [ ### gcmole(arch:ia32) ###
|
||||
"src/baseline/ia32/baseline-compiler-ia32-inl.h",
|
||||
"src/codegen/ia32/assembler-ia32-inl.h",
|
||||
"src/codegen/ia32/assembler-ia32.cc",
|
||||
"src/codegen/ia32/assembler-ia32.h",
|
||||
@ -3709,6 +3714,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||
]
|
||||
} else if (v8_current_cpu == "x64") {
|
||||
sources += [ ### gcmole(arch:x64) ###
|
||||
"src/baseline/x64/baseline-compiler-x64-inl.h",
|
||||
"src/codegen/x64/assembler-x64-inl.h",
|
||||
"src/codegen/x64/assembler-x64.cc",
|
||||
"src/codegen/x64/assembler-x64.h",
|
||||
@ -3759,6 +3765,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||
}
|
||||
} else if (v8_current_cpu == "arm") {
|
||||
sources += [ ### gcmole(arch:arm) ###
|
||||
"src/baseline/arm/baseline-compiler-arm-inl.h",
|
||||
"src/codegen/arm/assembler-arm-inl.h",
|
||||
"src/codegen/arm/assembler-arm.cc",
|
||||
"src/codegen/arm/assembler-arm.h",
|
||||
@ -3790,6 +3797,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||
]
|
||||
} else if (v8_current_cpu == "arm64") {
|
||||
sources += [ ### gcmole(arch:arm64) ###
|
||||
"src/baseline/arm64/baseline-compiler-arm64-inl.h",
|
||||
"src/codegen/arm64/assembler-arm64-inl.h",
|
||||
"src/codegen/arm64/assembler-arm64.cc",
|
||||
"src/codegen/arm64/assembler-arm64.h",
|
||||
@ -3849,6 +3857,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||
}
|
||||
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
|
||||
sources += [ ### gcmole(arch:mipsel) ###
|
||||
"src/baseline/mips/baseline-compiler-mips-inl.h",
|
||||
"src/codegen/mips/assembler-mips-inl.h",
|
||||
"src/codegen/mips/assembler-mips.cc",
|
||||
"src/codegen/mips/assembler-mips.h",
|
||||
@ -3877,6 +3886,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||
]
|
||||
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
|
||||
sources += [ ### gcmole(arch:mips64el) ###
|
||||
"src/baseline/mips64/baseline-compiler-mips64-inl.h",
|
||||
"src/codegen/mips64/assembler-mips64-inl.h",
|
||||
"src/codegen/mips64/assembler-mips64.cc",
|
||||
"src/codegen/mips64/assembler-mips64.h",
|
||||
@ -3905,6 +3915,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||
]
|
||||
} else if (v8_current_cpu == "ppc") {
|
||||
sources += [ ### gcmole(arch:ppc) ###
|
||||
"src/baseline/ppc/baseline-compiler-ppc-inl.h",
|
||||
"src/codegen/ppc/assembler-ppc-inl.h",
|
||||
"src/codegen/ppc/assembler-ppc.cc",
|
||||
"src/codegen/ppc/assembler-ppc.h",
|
||||
@ -3936,6 +3947,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||
]
|
||||
} else if (v8_current_cpu == "ppc64") {
|
||||
sources += [ ### gcmole(arch:ppc64) ###
|
||||
"src/baseline/ppc/baseline-compiler-ppc-inl.h",
|
||||
"src/codegen/ppc/assembler-ppc-inl.h",
|
||||
"src/codegen/ppc/assembler-ppc.cc",
|
||||
"src/codegen/ppc/assembler-ppc.h",
|
||||
@ -3967,6 +3979,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||
]
|
||||
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
|
||||
sources += [ ### gcmole(arch:s390) ###
|
||||
"src/baseline/s390/baseline-compiler-s390-inl.h",
|
||||
"src/codegen/s390/assembler-s390-inl.h",
|
||||
"src/codegen/s390/assembler-s390.cc",
|
||||
"src/codegen/s390/assembler-s390.h",
|
||||
|
5
src/baseline/DEPS
Normal file
5
src/baseline/DEPS
Normal file
@ -0,0 +1,5 @@
|
||||
specific_include_rules = {
|
||||
"baseline-compiler\.h": [
|
||||
"+src/interpreter/interpreter-intrinsics.h",
|
||||
],
|
||||
}
|
6
src/baseline/OWNERS
Normal file
6
src/baseline/OWNERS
Normal file
@ -0,0 +1,6 @@
|
||||
cbruni@chromium.org
|
||||
leszeks@chromium.org
|
||||
marja@chromium.org
|
||||
pthier@chromium.org
|
||||
verwaest@chromium.org
|
||||
victorgomes@chromium.org
|
619
src/baseline/arm64/baseline-compiler-arm64-inl.h
Normal file
619
src/baseline/arm64/baseline-compiler-arm64-inl.h
Normal file
@ -0,0 +1,619 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_BASELINE_ARM64_BASELINE_COMPILER_ARM64_INL_H_
|
||||
#define V8_BASELINE_ARM64_BASELINE_COMPILER_ARM64_INL_H_
|
||||
|
||||
#include "src/baseline/baseline-compiler.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace baseline {
|
||||
|
||||
class BaselineAssembler::ScratchRegisterScope {
|
||||
public:
|
||||
explicit ScratchRegisterScope(BaselineAssembler* assembler)
|
||||
: assembler_(assembler),
|
||||
prev_scope_(assembler->scratch_register_scope_),
|
||||
wrapped_scope_(assembler->masm()) {
|
||||
if (!assembler_->scratch_register_scope_) {
|
||||
// If we haven't opened a scratch scope yet, for the first one add a
|
||||
// couple of extra registers.
|
||||
wrapped_scope_.Include(x14, x15);
|
||||
}
|
||||
assembler_->scratch_register_scope_ = this;
|
||||
}
|
||||
~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
|
||||
|
||||
Register AcquireScratch() { return wrapped_scope_.AcquireX(); }
|
||||
|
||||
private:
|
||||
BaselineAssembler* assembler_;
|
||||
ScratchRegisterScope* prev_scope_;
|
||||
UseScratchRegisterScope wrapped_scope_;
|
||||
};
|
||||
|
||||
// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler.
|
||||
enum class Condition : uint8_t {
|
||||
kEqual = eq,
|
||||
kNotEqual = ne,
|
||||
|
||||
kLessThan = lt,
|
||||
kGreaterThan = gt,
|
||||
kLessThanEqual = le,
|
||||
kGreaterThanEqual = ge,
|
||||
|
||||
kUnsignedLessThan = lo,
|
||||
kUnsignedGreaterThan = hi,
|
||||
kUnsignedLessThanEqual = ls,
|
||||
kUnsignedGreaterThanEqual = hs,
|
||||
|
||||
kOverflow = vs,
|
||||
kNoOverflow = vc,
|
||||
|
||||
kZero = eq,
|
||||
kNotZero = ne,
|
||||
};
|
||||
|
||||
internal::Condition AsMasmCondition(Condition cond) {
|
||||
return static_cast<internal::Condition>(cond);
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
|
||||
#ifdef DEBUG
|
||||
bool Clobbers(Register target, MemOperand op) {
|
||||
return op.base() == target || op.regoffset() == target;
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace detail
|
||||
|
||||
#define __ masm_->
|
||||
|
||||
MemOperand BaselineAssembler::RegisterFrameOperand(
|
||||
interpreter::Register interpreter_register) {
|
||||
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
|
||||
__ B(target);
|
||||
}
|
||||
void BaselineAssembler::JumpIf(Condition cc, Label* target, Label::Distance) {
|
||||
__ B(AsMasmCondition(cc), target);
|
||||
}
|
||||
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
|
||||
Label* target, Label::Distance) {
|
||||
__ JumpIfRoot(value, index, target);
|
||||
}
|
||||
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
|
||||
Label* target, Label::Distance) {
|
||||
__ JumpIfNotRoot(value, index, target);
|
||||
}
|
||||
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
|
||||
Label::Distance) {
|
||||
__ JumpIfSmi(value, target);
|
||||
}
|
||||
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
|
||||
Label::Distance) {
|
||||
__ JumpIfNotSmi(value, target);
|
||||
}
|
||||
|
||||
void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireScratch();
|
||||
__ LoadEntryFromBuiltinIndex(builtin, temp);
|
||||
__ Call(temp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireScratch();
|
||||
__ LoadEntryFromBuiltinIndex(builtin, temp);
|
||||
__ Jump(temp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Test(Register value, int mask) {
|
||||
__ Tst(value, Immediate(mask));
|
||||
}
|
||||
|
||||
void BaselineAssembler::CmpObjectType(Register object,
|
||||
InstanceType instance_type,
|
||||
Register map) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register type = temps.AcquireScratch();
|
||||
__ CompareObjectType(object, map, type, instance_type);
|
||||
}
|
||||
void BaselineAssembler::CmpInstanceType(Register value,
|
||||
InstanceType instance_type) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register type = temps.AcquireScratch();
|
||||
__ CompareInstanceType(value, type, instance_type);
|
||||
}
|
||||
void BaselineAssembler::Cmp(Register value, Smi smi) { __ Cmp(value, smi); }
|
||||
void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register tmp = temps.AcquireScratch();
|
||||
__ Ldr(tmp, operand);
|
||||
__ Cmp(value, tmp);
|
||||
}
|
||||
void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
|
||||
__ AssertSmi(lhs);
|
||||
__ AssertSmi(rhs);
|
||||
__ CmpTagged(lhs, rhs);
|
||||
}
|
||||
void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register tmp = temps.AcquireScratch();
|
||||
__ Ldr(tmp, operand);
|
||||
__ CmpTagged(value, tmp);
|
||||
}
|
||||
void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register tmp = temps.AcquireScratch();
|
||||
__ Ldr(tmp, operand);
|
||||
__ CmpTagged(tmp, value);
|
||||
}
|
||||
void BaselineAssembler::CompareByte(Register value, int32_t byte) {
|
||||
__ Cmp(value, Immediate(byte));
|
||||
}
|
||||
|
||||
void BaselineAssembler::Move(interpreter::Register output, Register source) {
|
||||
Move(RegisterFrameOperand(output), source);
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, TaggedIndex value) {
|
||||
__ Mov(output, Immediate(value.ptr()));
|
||||
}
|
||||
void BaselineAssembler::Move(MemOperand output, Register source) {
|
||||
__ Str(source, output);
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, ExternalReference reference) {
|
||||
__ Mov(output, Operand(reference));
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
|
||||
__ Mov(output, Operand(value));
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, int32_t value) {
|
||||
__ Mov(output, Immediate(value));
|
||||
}
|
||||
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
|
||||
__ Mov(output, source);
|
||||
}
|
||||
void BaselineAssembler::MoveSmi(Register output, Register source) {
|
||||
__ Mov(output, source);
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
|
||||
template <typename Arg>
|
||||
Register ToRegister(BaselineAssembler* basm,
|
||||
BaselineAssembler::ScratchRegisterScope* scope, Arg arg) {
|
||||
Register reg = scope->AcquireScratch();
|
||||
basm->Move(reg, arg);
|
||||
return reg;
|
||||
}
|
||||
Register ToRegister(BaselineAssembler* basm,
|
||||
BaselineAssembler::ScratchRegisterScope* scope,
|
||||
Register reg) {
|
||||
return reg;
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
struct CountPushHelper;
|
||||
template <>
|
||||
struct CountPushHelper<> {
|
||||
static int Count() { return 0; }
|
||||
};
|
||||
template <typename Arg, typename... Args>
|
||||
struct CountPushHelper<Arg, Args...> {
|
||||
static int Count(Arg arg, Args... args) {
|
||||
return 1 + CountPushHelper<Args...>::Count(args...);
|
||||
}
|
||||
};
|
||||
template <typename... Args>
|
||||
struct CountPushHelper<interpreter::RegisterList, Args...> {
|
||||
static int Count(interpreter::RegisterList list, Args... args) {
|
||||
return list.register_count() + CountPushHelper<Args...>::Count(args...);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename... Args>
|
||||
struct PushAllHelper;
|
||||
template <typename... Args>
|
||||
void PushAll(BaselineAssembler* basm, Args... args) {
|
||||
PushAllHelper<Args...>::Push(basm, args...);
|
||||
}
|
||||
template <typename... Args>
|
||||
void PushAllReverse(BaselineAssembler* basm, Args... args) {
|
||||
PushAllHelper<Args...>::PushReverse(basm, args...);
|
||||
}
|
||||
|
||||
template <>
|
||||
struct PushAllHelper<> {
|
||||
static void Push(BaselineAssembler* basm) {}
|
||||
static void PushReverse(BaselineAssembler* basm) {}
|
||||
};
|
||||
template <typename Arg>
|
||||
struct PushAllHelper<Arg> {
|
||||
static void Push(BaselineAssembler* basm, Arg) { FATAL("Unaligned push"); }
|
||||
static void PushReverse(BaselineAssembler* basm, Arg arg) {
|
||||
// Push the padding register to round up the amount of values pushed.
|
||||
return PushAllReverse(basm, arg, padreg);
|
||||
}
|
||||
};
|
||||
template <typename Arg1, typename Arg2, typename... Args>
|
||||
struct PushAllHelper<Arg1, Arg2, Args...> {
|
||||
static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
|
||||
Args... args) {
|
||||
{
|
||||
BaselineAssembler::ScratchRegisterScope scope(basm);
|
||||
basm->masm()->Push(ToRegister(basm, &scope, arg1),
|
||||
ToRegister(basm, &scope, arg2));
|
||||
}
|
||||
PushAll(basm, args...);
|
||||
}
|
||||
static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
|
||||
Args... args) {
|
||||
PushAllReverse(basm, args...);
|
||||
{
|
||||
BaselineAssembler::ScratchRegisterScope scope(basm);
|
||||
basm->masm()->Push(ToRegister(basm, &scope, arg2),
|
||||
ToRegister(basm, &scope, arg1));
|
||||
}
|
||||
}
|
||||
};
|
||||
// Currently RegisterLists are always be the last argument, so we don't
|
||||
// specialize for the case where they're not. We do still specialise for the
|
||||
// aligned and unaligned cases.
|
||||
template <typename Arg>
|
||||
struct PushAllHelper<Arg, interpreter::RegisterList> {
|
||||
static void Push(BaselineAssembler* basm, Arg arg,
|
||||
interpreter::RegisterList list) {
|
||||
DCHECK_EQ(list.register_count() % 2, 1);
|
||||
PushAll(basm, arg, list[0], list.PopLeft());
|
||||
}
|
||||
static void PushReverse(BaselineAssembler* basm, Arg arg,
|
||||
interpreter::RegisterList list) {
|
||||
if (list.register_count() == 0) {
|
||||
PushAllReverse(basm, arg);
|
||||
} else {
|
||||
PushAllReverse(basm, arg, list[0], list.PopLeft());
|
||||
}
|
||||
}
|
||||
};
|
||||
template <>
|
||||
struct PushAllHelper<interpreter::RegisterList> {
|
||||
static void Push(BaselineAssembler* basm, interpreter::RegisterList list) {
|
||||
DCHECK_EQ(list.register_count() % 2, 0);
|
||||
for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) {
|
||||
PushAll(basm, list[reg_index], list[reg_index + 1]);
|
||||
}
|
||||
}
|
||||
static void PushReverse(BaselineAssembler* basm,
|
||||
interpreter::RegisterList list) {
|
||||
int reg_index = list.register_count() - 1;
|
||||
if (reg_index % 2 == 0) {
|
||||
// Push the padding register to round up the amount of values pushed.
|
||||
PushAllReverse(basm, list[reg_index], padreg);
|
||||
reg_index--;
|
||||
}
|
||||
for (; reg_index >= 1; reg_index -= 2) {
|
||||
PushAllReverse(basm, list[reg_index - 1], list[reg_index]);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename... T>
|
||||
struct PopAllHelper;
|
||||
template <>
|
||||
struct PopAllHelper<> {
|
||||
static void Pop(BaselineAssembler* basm) {}
|
||||
};
|
||||
template <>
|
||||
struct PopAllHelper<Register> {
|
||||
static void Pop(BaselineAssembler* basm, Register reg) {
|
||||
basm->masm()->Pop(reg, padreg);
|
||||
}
|
||||
};
|
||||
template <typename... T>
|
||||
struct PopAllHelper<Register, Register, T...> {
|
||||
static void Pop(BaselineAssembler* basm, Register reg1, Register reg2,
|
||||
T... tail) {
|
||||
basm->masm()->Pop(reg1, reg2);
|
||||
PopAllHelper<T...>::Pop(basm, tail...);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
template <typename... T>
|
||||
int BaselineAssembler::Push(T... vals) {
|
||||
// We have to count the pushes first, to decide whether to add padding before
|
||||
// the first push.
|
||||
int push_count = detail::CountPushHelper<T...>::Count(vals...);
|
||||
if (push_count % 2 == 0) {
|
||||
detail::PushAll(this, vals...);
|
||||
} else {
|
||||
detail::PushAll(this, padreg, vals...);
|
||||
}
|
||||
return push_count;
|
||||
}
|
||||
|
||||
template <typename... T>
|
||||
void BaselineAssembler::PushReverse(T... vals) {
|
||||
detail::PushAllReverse(this, vals...);
|
||||
}
|
||||
|
||||
template <typename... T>
|
||||
void BaselineAssembler::Pop(T... registers) {
|
||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
||||
int offset) {
|
||||
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||
int offset) {
|
||||
__ LoadTaggedSignedField(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
||||
int offset) {
|
||||
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadByteField(Register output, Register source,
|
||||
int offset) {
|
||||
__ Ldrb(output, FieldMemOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
|
||||
Smi value) {
|
||||
ScratchRegisterScope temps(this);
|
||||
Register tmp = temps.AcquireScratch();
|
||||
__ Mov(tmp, Operand(value));
|
||||
__ StoreTaggedField(tmp, FieldMemOperand(target, offset));
|
||||
}
|
||||
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
|
||||
int offset,
|
||||
Register value) {
|
||||
__ StoreTaggedField(value, FieldMemOperand(target, offset));
|
||||
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
|
||||
kDontSaveFPRegs);
|
||||
}
|
||||
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
|
||||
int offset,
|
||||
Register value) {
|
||||
__ StoreTaggedField(value, FieldMemOperand(target, offset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
||||
__ Ldr(interrupt_budget,
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
|
||||
// Remember to set flags as part of the add!
|
||||
__ Adds(interrupt_budget, interrupt_budget, weight);
|
||||
__ Str(interrupt_budget,
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddToInterruptBudget(Register weight) {
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
|
||||
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
||||
__ Ldr(interrupt_budget,
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
|
||||
// Remember to set flags as part of the add!
|
||||
__ Adds(interrupt_budget, interrupt_budget, weight.W());
|
||||
__ Str(interrupt_budget,
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
|
||||
if (SmiValuesAre31Bits()) {
|
||||
__ Add(lhs.W(), lhs.W(), Immediate(rhs));
|
||||
} else {
|
||||
DCHECK(lhs.IsX());
|
||||
__ Add(lhs, lhs, Immediate(rhs));
|
||||
}
|
||||
}
|
||||
|
||||
void BaselineAssembler::Switch(Register reg, int case_value_base,
|
||||
Label** labels, int num_labels) {
|
||||
Label fallthrough;
|
||||
if (case_value_base > 0) {
|
||||
__ Sub(reg, reg, Immediate(case_value_base));
|
||||
}
|
||||
|
||||
// Mostly copied from code-generator-arm64.cc
|
||||
ScratchRegisterScope scope(this);
|
||||
Register temp = scope.AcquireScratch();
|
||||
Label table;
|
||||
__ Cmp(reg, num_labels);
|
||||
JumpIf(Condition::kUnsignedGreaterThanEqual, &fallthrough);
|
||||
__ Adr(temp, &table);
|
||||
int entry_size_log2 = 2;
|
||||
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
|
||||
++entry_size_log2; // Account for BTI.
|
||||
#endif
|
||||
__ Add(temp, temp, Operand(reg, UXTW, entry_size_log2));
|
||||
__ Br(temp);
|
||||
{
|
||||
TurboAssembler::BlockPoolsScope block_pools(masm_, num_labels * kInstrSize);
|
||||
__ Bind(&table);
|
||||
for (int i = 0; i < num_labels; ++i) {
|
||||
__ JumpTarget();
|
||||
__ B(labels[i]);
|
||||
}
|
||||
__ JumpTarget();
|
||||
__ Bind(&fallthrough);
|
||||
}
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#define __ basm_.
|
||||
|
||||
void BaselineCompiler::Prologue() {
|
||||
__ masm()->Mov(kInterpreterBytecodeArrayRegister, Operand(bytecode_));
|
||||
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
|
||||
// Enter the frame here, since CallBuiltin will override lr.
|
||||
__ masm()->EnterFrame(StackFrame::MANUAL);
|
||||
CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
|
||||
kJSFunctionRegister, kJavaScriptCallArgCountRegister,
|
||||
kInterpreterBytecodeArrayRegister);
|
||||
|
||||
__ masm()->AssertSpAligned();
|
||||
PrologueFillFrame();
|
||||
__ masm()->AssertSpAligned();
|
||||
}
|
||||
|
||||
void BaselineCompiler::PrologueFillFrame() {
|
||||
__ RecordComment("[ Fill frame");
|
||||
// Inlined register frame fill
|
||||
interpreter::Register new_target_or_generator_register =
|
||||
bytecode_->incoming_new_target_or_generator_register();
|
||||
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||
int register_count = bytecode_->register_count();
|
||||
// Magic value
|
||||
const int kLoopUnrollSize = 8;
|
||||
const int new_target_index = new_target_or_generator_register.index();
|
||||
const bool has_new_target = new_target_index != kMaxInt;
|
||||
// BaselineOutOfLinePrologue already pushed one undefined.
|
||||
int i = 1;
|
||||
if (has_new_target) {
|
||||
if (new_target_index == 0) {
|
||||
// Oops, need to fix up that undefined that BaselineOutOfLinePrologue
|
||||
// pushed.
|
||||
__ masm()->Poke(kJavaScriptCallNewTargetRegister, Operand(0));
|
||||
} else {
|
||||
DCHECK_LE(new_target_index, register_count);
|
||||
for (; i + 2 <= new_target_index; i += 2) {
|
||||
__ masm()->Push(kInterpreterAccumulatorRegister,
|
||||
kInterpreterAccumulatorRegister);
|
||||
}
|
||||
if (i == new_target_index) {
|
||||
__ masm()->Push(kJavaScriptCallNewTargetRegister,
|
||||
kInterpreterAccumulatorRegister);
|
||||
} else {
|
||||
DCHECK_EQ(i, new_target_index + 1);
|
||||
__ masm()->Push(kInterpreterAccumulatorRegister,
|
||||
kJavaScriptCallNewTargetRegister);
|
||||
}
|
||||
i += 2;
|
||||
}
|
||||
}
|
||||
if (register_count < 2 * kLoopUnrollSize) {
|
||||
// If the frame is small enough, just unroll the frame fill completely.
|
||||
for (; i < register_count; i += 2) {
|
||||
__ masm()->Push(kInterpreterAccumulatorRegister,
|
||||
kInterpreterAccumulatorRegister);
|
||||
}
|
||||
} else {
|
||||
BaselineAssembler::ScratchRegisterScope temps(&basm_);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
|
||||
register_count -= i;
|
||||
i = 0;
|
||||
// Extract the first few registers to round to the unroll size.
|
||||
int first_registers = register_count % kLoopUnrollSize;
|
||||
for (; i < first_registers; i += 2) {
|
||||
__ masm()->Push(kInterpreterAccumulatorRegister,
|
||||
kInterpreterAccumulatorRegister);
|
||||
}
|
||||
__ Move(scratch, register_count / kLoopUnrollSize);
|
||||
// We enter the loop unconditionally, so make sure we need to loop at least
|
||||
// once.
|
||||
DCHECK_GT(register_count / kLoopUnrollSize, 0);
|
||||
Label loop;
|
||||
__ Bind(&loop);
|
||||
for (int j = 0; j < kLoopUnrollSize; j += 2) {
|
||||
__ masm()->Push(kInterpreterAccumulatorRegister,
|
||||
kInterpreterAccumulatorRegister);
|
||||
}
|
||||
__ masm()->Subs(scratch, scratch, 1);
|
||||
__ JumpIf(Condition::kGreaterThan, &loop);
|
||||
}
|
||||
__ RecordComment("]");
|
||||
}
|
||||
|
||||
void BaselineCompiler::VerifyFrameSize() {
|
||||
__ masm()->Add(x15, sp,
|
||||
RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp +
|
||||
bytecode_->frame_size(),
|
||||
2 * kSystemPointerSize));
|
||||
__ masm()->Cmp(x15, fp);
|
||||
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#define __ basm.
|
||||
|
||||
void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
|
||||
BaselineAssembler basm(masm);
|
||||
|
||||
Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
|
||||
Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
|
||||
|
||||
__ RecordComment("[ Update Interrupt Budget");
|
||||
__ AddToInterruptBudget(weight);
|
||||
|
||||
// Use compare flags set by add
|
||||
Label skip_interrupt_label;
|
||||
__ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
|
||||
{
|
||||
__ masm()->SmiTag(params_size);
|
||||
__ masm()->Push(params_size, kInterpreterAccumulatorRegister);
|
||||
|
||||
__ LoadContext(kContextRegister);
|
||||
__ LoadFunction(kJSFunctionRegister);
|
||||
__ masm()->PushArgument(kJSFunctionRegister);
|
||||
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
|
||||
|
||||
__ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
|
||||
__ masm()->SmiUntag(params_size);
|
||||
}
|
||||
__ RecordComment("]");
|
||||
|
||||
__ Bind(&skip_interrupt_label);
|
||||
|
||||
BaselineAssembler::ScratchRegisterScope temps(&basm);
|
||||
Register actual_params_size = temps.AcquireScratch();
|
||||
// Compute the size of the actual parameters + receiver (in bytes).
|
||||
__ Move(actual_params_size,
|
||||
MemOperand(fp, StandardFrameConstants::kArgCOffset));
|
||||
|
||||
// If actual is bigger than formal, then we should use it to free up the stack
|
||||
// arguments.
|
||||
Label corrected_args_count;
|
||||
__ masm()->Cmp(params_size, actual_params_size);
|
||||
__ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count);
|
||||
__ masm()->Mov(params_size, actual_params_size);
|
||||
__ Bind(&corrected_args_count);
|
||||
|
||||
// Leave the frame (also dropping the register file).
|
||||
__ masm()->LeaveFrame(StackFrame::MANUAL);
|
||||
|
||||
// Drop receiver + arguments.
|
||||
__ masm()->Add(params_size, params_size, 1); // Include the receiver.
|
||||
__ masm()->DropArguments(params_size);
|
||||
__ masm()->Ret();
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
} // namespace baseline
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_BASELINE_ARM64_BASELINE_COMPILER_ARM64_INL_H_
|
2277
src/baseline/baseline-compiler.cc
Normal file
2277
src/baseline/baseline-compiler.cc
Normal file
File diff suppressed because it is too large
Load Diff
356
src/baseline/baseline-compiler.h
Normal file
356
src/baseline/baseline-compiler.h
Normal file
@ -0,0 +1,356 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_BASELINE_BASELINE_COMPILER_H_
|
||||
#define V8_BASELINE_BASELINE_COMPILER_H_
|
||||
|
||||
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
|
||||
// architectures.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
#include "src/base/logging.h"
|
||||
#include "src/codegen/macro-assembler.h"
|
||||
#include "src/handles/handles.h"
|
||||
#include "src/interpreter/bytecode-array-iterator.h"
|
||||
#include "src/interpreter/bytecode-register.h"
|
||||
#include "src/interpreter/interpreter-intrinsics.h"
|
||||
#include "src/logging/counters.h"
|
||||
#include "src/objects/map.h"
|
||||
#include "src/objects/tagged-index.h"
|
||||
#include "src/zone/zone-containers.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class BytecodeArray;
|
||||
|
||||
namespace baseline {
|
||||
|
||||
enum class Condition : uint8_t;
|
||||
|
||||
class BytecodeOffsetTableBuilder {
|
||||
public:
|
||||
void AddPosition(size_t pc_offset, size_t bytecode_offset) {
|
||||
WriteUint(pc_offset - previous_pc_);
|
||||
WriteUint(bytecode_offset - previous_bytecode_);
|
||||
previous_pc_ = pc_offset;
|
||||
previous_bytecode_ = bytecode_offset;
|
||||
}
|
||||
|
||||
template <typename LocalIsolate>
|
||||
Handle<ByteArray> ToBytecodeOffsetTable(LocalIsolate* isolate);
|
||||
|
||||
private:
|
||||
void WriteUint(size_t value) {
|
||||
bool has_next;
|
||||
do {
|
||||
uint8_t byte = value & ((1 << 7) - 1);
|
||||
value >>= 7;
|
||||
has_next = value != 0;
|
||||
byte |= (has_next << 7);
|
||||
bytes_.push_back(byte);
|
||||
} while (has_next);
|
||||
}
|
||||
|
||||
size_t previous_pc_ = 0;
|
||||
size_t previous_bytecode_ = 0;
|
||||
std::vector<byte> bytes_;
|
||||
};
|
||||
|
||||
class BaselineAssembler {
|
||||
public:
|
||||
class ScratchRegisterScope;
|
||||
|
||||
explicit BaselineAssembler(MacroAssembler* masm) : masm_(masm) {}
|
||||
static MemOperand RegisterFrameOperand(
|
||||
interpreter::Register interpreter_register);
|
||||
MemOperand ContextOperand();
|
||||
MemOperand FunctionOperand();
|
||||
MemOperand FeedbackVectorOperand();
|
||||
|
||||
void GetCode(Isolate* isolate, CodeDesc* desc);
|
||||
int pc_offset() const;
|
||||
bool emit_debug_code() const;
|
||||
void RecordComment(const char* string);
|
||||
void Trap();
|
||||
void DebugBreak();
|
||||
|
||||
void Bind(Label* label);
|
||||
void JumpIf(Condition cc, Label* target,
|
||||
Label::Distance distance = Label::kFar);
|
||||
void Jump(Label* target, Label::Distance distance = Label::kFar);
|
||||
void JumpIfRoot(Register value, RootIndex index, Label* target,
|
||||
Label::Distance distance = Label::kFar);
|
||||
void JumpIfNotRoot(Register value, RootIndex index, Label* target,
|
||||
Label ::Distance distance = Label::kFar);
|
||||
void JumpIfSmi(Register value, Label* target,
|
||||
Label::Distance distance = Label::kFar);
|
||||
void JumpIfNotSmi(Register value, Label* target,
|
||||
Label::Distance distance = Label::kFar);
|
||||
|
||||
void Test(Register value, int mask);
|
||||
|
||||
void CmpObjectType(Register object, InstanceType instance_type, Register map);
|
||||
void CmpInstanceType(Register value, InstanceType instance_type);
|
||||
void Cmp(Register value, Smi smi);
|
||||
void ComparePointer(Register value, MemOperand operand);
|
||||
Condition CheckSmi(Register value);
|
||||
void SmiCompare(Register lhs, Register rhs);
|
||||
void CompareTagged(Register value, MemOperand operand);
|
||||
void CompareTagged(MemOperand operand, Register value);
|
||||
void CompareByte(Register value, int32_t byte);
|
||||
|
||||
void LoadMap(Register output, Register value);
|
||||
void LoadRoot(Register output, RootIndex index);
|
||||
void LoadNativeContextSlot(Register output, uint32_t index);
|
||||
|
||||
void Move(Register output, Register source);
|
||||
void Move(Register output, MemOperand operand);
|
||||
void Move(Register output, Smi value);
|
||||
void Move(Register output, TaggedIndex value);
|
||||
void Move(Register output, interpreter::Register source);
|
||||
void Move(interpreter::Register output, Register source);
|
||||
void Move(Register output, RootIndex source);
|
||||
void Move(MemOperand output, Register source);
|
||||
void Move(Register output, ExternalReference reference);
|
||||
void Move(Register output, Handle<HeapObject> value);
|
||||
void Move(Register output, int32_t immediate);
|
||||
void MoveMaybeSmi(Register output, Register source);
|
||||
void MoveSmi(Register output, Register source);
|
||||
|
||||
// Push the given values, in the given order. If the stack needs alignment
|
||||
// (looking at you Arm64), the stack is padded from the front (i.e. before the
|
||||
// first value is pushed).
|
||||
//
|
||||
// This supports pushing a RegisterList as the last value -- the list is
|
||||
// iterated and each interpreter Register is pushed.
|
||||
//
|
||||
// The total number of values pushed is returned. Note that this might be
|
||||
// different from sizeof(T...), specifically if there was a RegisterList.
|
||||
template <typename... T>
|
||||
int Push(T... vals);
|
||||
|
||||
// Like Push(vals...), but pushes in reverse order, to support our reversed
|
||||
// order argument JS calling convention. Doesn't return the number of
|
||||
// arguments pushed though.
|
||||
//
|
||||
// Note that padding is still inserted before the first pushed value (i.e. the
|
||||
// last value).
|
||||
template <typename... T>
|
||||
void PushReverse(T... vals);
|
||||
|
||||
// Pop values off the stack into the given registers.
|
||||
//
|
||||
// Note that this inserts into registers in the given order, i.e. in reverse
|
||||
// order if the registers were pushed. This means that to spill registers,
|
||||
// push and pop have to be in reverse order, e.g.
|
||||
//
|
||||
// Push(r1, r2, ..., rN);
|
||||
// ClobberRegisters();
|
||||
// Pop(rN, ..., r2, r1);
|
||||
//
|
||||
// On stack-alignment architectures, any padding is popped off after the last
|
||||
// register. This the behaviour of Push, which means that the above code still
|
||||
// works even if the number of registers doesn't match stack alignment.
|
||||
template <typename... T>
|
||||
void Pop(T... registers);
|
||||
|
||||
void CallBuiltin(Builtins::Name builtin);
|
||||
void TailCallBuiltin(Builtins::Name builtin);
|
||||
void CallRuntime(Runtime::FunctionId function, int nargs);
|
||||
|
||||
void LoadTaggedPointerField(Register output, Register source, int offset);
|
||||
void LoadTaggedSignedField(Register output, Register source, int offset);
|
||||
void LoadTaggedAnyField(Register output, Register source, int offset);
|
||||
void LoadByteField(Register output, Register source, int offset);
|
||||
void StoreTaggedSignedField(Register target, int offset, Smi value);
|
||||
void StoreTaggedFieldWithWriteBarrier(Register target, int offset,
|
||||
Register value);
|
||||
void StoreTaggedFieldNoWriteBarrier(Register target, int offset,
|
||||
Register value);
|
||||
void LoadFixedArrayElement(Register output, Register array, int32_t index);
|
||||
void LoadPrototype(Register prototype, Register object);
|
||||
|
||||
// Loads the feedback cell from the function, and sets flags on add so that
|
||||
// we can compare afterward.
|
||||
void AddToInterruptBudget(int32_t weight);
|
||||
void AddToInterruptBudget(Register weight);
|
||||
|
||||
void AddSmi(Register lhs, Smi rhs);
|
||||
void SmiUntag(Register value);
|
||||
void SmiUntag(Register output, Register value);
|
||||
|
||||
void Switch(Register reg, int case_value_base, Label** labels,
|
||||
int num_labels);
|
||||
|
||||
// Register operands.
|
||||
void LoadRegister(Register output, interpreter::Register source);
|
||||
void StoreRegister(interpreter::Register output, Register value);
|
||||
|
||||
// Frame values
|
||||
void LoadFunction(Register output);
|
||||
void LoadContext(Register output);
|
||||
void StoreContext(Register context);
|
||||
|
||||
static void EmitReturn(MacroAssembler* masm);
|
||||
|
||||
MacroAssembler* masm() { return masm_; }
|
||||
|
||||
private:
|
||||
MacroAssembler* masm_;
|
||||
ScratchRegisterScope* scratch_register_scope_ = nullptr;
|
||||
};
|
||||
|
||||
class SaveAccumulatorScope final {
|
||||
public:
|
||||
explicit SaveAccumulatorScope(BaselineAssembler* assembler)
|
||||
: assembler_(assembler) {
|
||||
assembler_->Push(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
|
||||
~SaveAccumulatorScope() { assembler_->Pop(kInterpreterAccumulatorRegister); }
|
||||
|
||||
private:
|
||||
BaselineAssembler* assembler_;
|
||||
};
|
||||
|
||||
class BaselineCompiler {
|
||||
public:
|
||||
explicit BaselineCompiler(Isolate* isolate,
|
||||
Handle<SharedFunctionInfo> shared_function_info,
|
||||
Handle<BytecodeArray> bytecode);
|
||||
|
||||
void GenerateCode();
|
||||
Handle<Code> Build(Isolate* isolate);
|
||||
|
||||
private:
|
||||
void Prologue();
|
||||
void PrologueFillFrame();
|
||||
void PrologueHandleOptimizationState(Register feedback_vector);
|
||||
|
||||
void PreVisitSingleBytecode();
|
||||
void VisitSingleBytecode();
|
||||
|
||||
void VerifyFrame();
|
||||
void VerifyFrameSize();
|
||||
|
||||
// Register operands.
|
||||
interpreter::Register RegisterOperand(int operand_index);
|
||||
void LoadRegister(Register output, int operand_index);
|
||||
void StoreRegister(int operand_index, Register value);
|
||||
void StoreRegisterPair(int operand_index, Register val0, Register val1);
|
||||
|
||||
// Constant pool operands.
|
||||
template <typename Type>
|
||||
Handle<Type> Constant(int operand_index);
|
||||
Smi ConstantSmi(int operand_index);
|
||||
template <typename Type>
|
||||
void LoadConstant(Register output, int operand_index);
|
||||
|
||||
// Immediate value operands.
|
||||
uint32_t Uint(int operand_index);
|
||||
int32_t Int(int operand_index);
|
||||
uint32_t Index(int operand_index);
|
||||
uint32_t Flag(int operand_index);
|
||||
uint32_t RegisterCount(int operand_index);
|
||||
TaggedIndex IndexAsTagged(int operand_index);
|
||||
TaggedIndex UintAsTagged(int operand_index);
|
||||
Smi IndexAsSmi(int operand_index);
|
||||
Smi IntAsSmi(int operand_index);
|
||||
Smi FlagAsSmi(int operand_index);
|
||||
|
||||
// Jump helpers.
|
||||
Label* NewLabel();
|
||||
Label* BuildForwardJumpLabel();
|
||||
void UpdateInterruptBudgetAndJumpToLabel(int weight, Label* label,
|
||||
Label* skip_interrupt_label);
|
||||
void UpdateInterruptBudgetAndDoInterpreterJump();
|
||||
void UpdateInterruptBudgetAndDoInterpreterJumpIfRoot(RootIndex root);
|
||||
void UpdateInterruptBudgetAndDoInterpreterJumpIfNotRoot(RootIndex root);
|
||||
|
||||
// Feedback vector.
|
||||
MemOperand FeedbackVector();
|
||||
void LoadFeedbackVector(Register output);
|
||||
void LoadClosureFeedbackArray(Register output, Register closure);
|
||||
|
||||
// Position mapping.
|
||||
void AddPosition();
|
||||
|
||||
// Misc. helpers.
|
||||
|
||||
// Select the root boolean constant based on the jump in the given
|
||||
// `jump_func` -- the function should jump to the given label if we want to
|
||||
// select "true", otherwise it should fall through.
|
||||
void SelectBooleanConstant(
|
||||
Register output, std::function<void(Label*, Label::Distance)> jump_func);
|
||||
|
||||
// Returns ToBoolean result into kInterpreterAccumulatorRegister.
|
||||
void JumpIfToBoolean(bool do_jump_if_true, Register reg, Label* label,
|
||||
Label::Distance distance = Label::kFar);
|
||||
|
||||
// Call helpers.
|
||||
template <typename... Args>
|
||||
void CallBuiltin(Builtins::Name builtin, Args... args);
|
||||
template <typename... Args>
|
||||
void CallRuntime(Runtime::FunctionId function, Args... args);
|
||||
|
||||
template <typename... Args>
|
||||
void TailCallBuiltin(Builtins::Name builtin, Args... args);
|
||||
|
||||
void BuildBinop(
|
||||
Builtins::Name builtin_name, bool fast_path = false,
|
||||
bool check_overflow = false,
|
||||
std::function<void(Register, Register)> instruction = [](Register,
|
||||
Register) {});
|
||||
void BuildUnop(Builtins::Name builtin_name);
|
||||
void BuildCompare(Builtins::Name builtin_name);
|
||||
void BuildBinopWithConstant(Builtins::Name builtin_name);
|
||||
|
||||
template <typename... Args>
|
||||
void BuildCall(ConvertReceiverMode mode, uint32_t slot, uint32_t arg_count,
|
||||
Args... args);
|
||||
|
||||
#ifdef V8_TRACE_IGNITION
|
||||
void TraceBytecode(Runtime::FunctionId function_id);
|
||||
#endif
|
||||
|
||||
// Single bytecode visitors.
|
||||
#define DECLARE_VISITOR(name, ...) void Visit##name();
|
||||
BYTECODE_LIST(DECLARE_VISITOR)
|
||||
#undef DECLARE_VISITOR
|
||||
|
||||
// Intrinsic call visitors.
|
||||
#define DECLARE_VISITOR(name, ...) \
|
||||
void VisitIntrinsic##name(interpreter::RegisterList args);
|
||||
INTRINSICS_LIST(DECLARE_VISITOR)
|
||||
#undef DECLARE_VISITOR
|
||||
|
||||
const interpreter::BytecodeArrayAccessor& accessor() { return iterator_; }
|
||||
|
||||
Isolate* isolate_;
|
||||
RuntimeCallStats* stats_;
|
||||
Handle<SharedFunctionInfo> shared_function_info_;
|
||||
Handle<BytecodeArray> bytecode_;
|
||||
MacroAssembler masm_;
|
||||
BaselineAssembler basm_;
|
||||
interpreter::BytecodeArrayIterator iterator_;
|
||||
BytecodeOffsetTableBuilder bytecode_offset_table_builder_;
|
||||
Zone zone_;
|
||||
|
||||
// TODO(v8:11429,leszeks): Consider using a sorted vector or similar, instead
|
||||
// of a map.
|
||||
ZoneMap<int, ZoneVector<Label*>> linked_labels_;
|
||||
ZoneMap<int, Label*> unlinked_labels_;
|
||||
ZoneSet<int> handler_offsets_;
|
||||
};
|
||||
|
||||
} // namespace baseline
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif
|
||||
|
||||
#endif // V8_BASELINE_BASELINE_COMPILER_H_
|
99
src/baseline/baseline.cc
Normal file
99
src/baseline/baseline.cc
Normal file
@ -0,0 +1,99 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/baseline/baseline.h"
|
||||
|
||||
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
|
||||
// architectures.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
|
||||
#include "src/baseline/baseline-compiler.h"
|
||||
#include "src/heap/factory-inl.h"
|
||||
#include "src/logging/counters.h"
|
||||
#include "src/objects/script-inl.h"
|
||||
#include "src/objects/shared-function-info-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
Handle<Code> CompileWithBaseline(
|
||||
Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info,
|
||||
Handle<BytecodeArray> bytecode) {
|
||||
RuntimeCallTimerScope runtimeTimer(isolate,
|
||||
RuntimeCallCounterId::kCompileBaseline);
|
||||
baseline::BaselineCompiler compiler(isolate, shared_function_info, bytecode);
|
||||
|
||||
compiler.GenerateCode();
|
||||
|
||||
return compiler.Build(isolate);
|
||||
}
|
||||
|
||||
// TODO(v8:11429): This can be the basis of Compiler::CompileBaseline
|
||||
Handle<Code> CompileWithBaseline(Isolate* isolate,
|
||||
Handle<SharedFunctionInfo> shared) {
|
||||
if (shared->HasBaselineData()) {
|
||||
return handle(shared->baseline_data().baseline_code(), isolate);
|
||||
}
|
||||
|
||||
if (FLAG_trace_opt) {
|
||||
PrintF("[compiling method ");
|
||||
shared->ShortPrint();
|
||||
PrintF(" using Sparkplug]\n");
|
||||
}
|
||||
|
||||
base::ElapsedTimer timer;
|
||||
timer.Start();
|
||||
|
||||
Handle<Code> code = CompileWithBaseline(
|
||||
isolate, shared, handle(shared->GetBytecodeArray(isolate), isolate));
|
||||
|
||||
// TODO(v8:11429): Extract to Factory::NewBaselineData
|
||||
Handle<BaselineData> baseline_data = Handle<BaselineData>::cast(
|
||||
isolate->factory()->NewStruct(BASELINE_DATA_TYPE, AllocationType::kOld));
|
||||
baseline_data->set_baseline_code(*code);
|
||||
baseline_data->set_data(
|
||||
HeapObject::cast(shared->function_data(kAcquireLoad)));
|
||||
|
||||
shared->set_baseline_data(*baseline_data);
|
||||
|
||||
if (FLAG_print_code) {
|
||||
code->Print();
|
||||
}
|
||||
|
||||
if (shared->script().IsScript()) {
|
||||
Compiler::LogFunctionCompilation(
|
||||
isolate, CodeEventListener::FUNCTION_TAG, shared,
|
||||
handle(Script::cast(shared->script()), isolate),
|
||||
Handle<AbstractCode>::cast(code), CodeKind::SPARKPLUG,
|
||||
timer.Elapsed().InMillisecondsF());
|
||||
}
|
||||
|
||||
if (FLAG_trace_opt) {
|
||||
// TODO(v8:11429): Move to Compiler.
|
||||
PrintF("[completed compiling ");
|
||||
shared->ShortPrint();
|
||||
PrintF(" using Sparkplug - took %0.3f ms]\n",
|
||||
timer.Elapsed().InMillisecondsF());
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#else
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
Handle<Code> CompileWithBaseline(Isolate* isolate,
|
||||
Handle<SharedFunctionInfo> shared) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif
|
24
src/baseline/baseline.h
Normal file
24
src/baseline/baseline.h
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_BASELINE_BASELINE_H_
|
||||
#define V8_BASELINE_BASELINE_H_
|
||||
|
||||
#include "src/handles/handles.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class Code;
|
||||
class SharedFunctionInfo;
|
||||
class BytecodeArray;
|
||||
|
||||
// TODO(v8:11429): Restrict header visibility to just this file.
|
||||
Handle<Code> CompileWithBaseline(Isolate* isolate,
|
||||
Handle<SharedFunctionInfo> shared);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_BASELINE_BASELINE_H_
|
504
src/baseline/x64/baseline-compiler-x64-inl.h
Normal file
504
src/baseline/x64/baseline-compiler-x64-inl.h
Normal file
@ -0,0 +1,504 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_BASELINE_X64_BASELINE_COMPILER_X64_INL_H_
|
||||
#define V8_BASELINE_X64_BASELINE_COMPILER_X64_INL_H_
|
||||
|
||||
#include "src/base/macros.h"
|
||||
#include "src/baseline/baseline-compiler.h"
|
||||
#include "src/codegen/interface-descriptors.h"
|
||||
#include "src/codegen/x64/register-x64.h"
|
||||
#include "src/objects/code-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace baseline {
|
||||
|
||||
namespace detail {
|
||||
|
||||
// TODO(v8:11429,verwaest): For now this avoids using kScratchRegister(==r10)
|
||||
// since the macro-assembler doesn't use this scope and will conflict.
|
||||
static constexpr Register kScratchRegisters[] = {r8, r9, r11, r12, r14, r15};
|
||||
static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters);
|
||||
|
||||
} // namespace detail
|
||||
|
||||
// TODO(v8:11429): Move BaselineAssembler to baseline-assembler-<arch>-inl.h
|
||||
class BaselineAssembler::ScratchRegisterScope {
|
||||
public:
|
||||
explicit ScratchRegisterScope(BaselineAssembler* assembler)
|
||||
: assembler_(assembler),
|
||||
prev_scope_(assembler->scratch_register_scope_),
|
||||
registers_used_(prev_scope_ == nullptr ? 0
|
||||
: prev_scope_->registers_used_) {
|
||||
assembler_->scratch_register_scope_ = this;
|
||||
}
|
||||
~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
|
||||
|
||||
Register AcquireScratch() {
|
||||
DCHECK_LT(registers_used_, detail::kNumScratchRegisters);
|
||||
return detail::kScratchRegisters[registers_used_++];
|
||||
}
|
||||
|
||||
private:
|
||||
BaselineAssembler* assembler_;
|
||||
ScratchRegisterScope* prev_scope_;
|
||||
int registers_used_;
|
||||
};
|
||||
|
||||
// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler.
|
||||
enum class Condition : uint8_t {
|
||||
kEqual = equal,
|
||||
kNotEqual = not_equal,
|
||||
|
||||
kLessThan = less,
|
||||
kGreaterThan = greater,
|
||||
kLessThanEqual = less_equal,
|
||||
kGreaterThanEqual = greater_equal,
|
||||
|
||||
kUnsignedLessThan = below,
|
||||
kUnsignedGreaterThan = above,
|
||||
kUnsignedLessThanEqual = below_equal,
|
||||
kUnsignedGreaterThanEqual = above_equal,
|
||||
|
||||
kOverflow = overflow,
|
||||
kNoOverflow = no_overflow,
|
||||
|
||||
kZero = zero,
|
||||
kNotZero = not_zero,
|
||||
};
|
||||
|
||||
internal::Condition AsMasmCondition(Condition cond) {
|
||||
return static_cast<internal::Condition>(cond);
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
|
||||
#define __ masm_->
|
||||
|
||||
#ifdef DEBUG
|
||||
bool Clobbers(Register target, MemOperand op) {
|
||||
return op.AddressUsesRegister(target);
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace detail
|
||||
|
||||
MemOperand BaselineAssembler::RegisterFrameOperand(
|
||||
interpreter::Register interpreter_register) {
|
||||
return MemOperand(rbp, interpreter_register.ToOperand() * kSystemPointerSize);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
|
||||
__ jmp(target, distance);
|
||||
}
|
||||
void BaselineAssembler::JumpIf(Condition cc, Label* target,
|
||||
Label::Distance distance) {
|
||||
__ j(AsMasmCondition(cc), target, distance);
|
||||
}
|
||||
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
|
||||
Label* target, Label::Distance distance) {
|
||||
__ JumpIfRoot(value, index, target, distance);
|
||||
}
|
||||
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
|
||||
Label* target, Label::Distance distance) {
|
||||
__ JumpIfNotRoot(value, index, target, distance);
|
||||
}
|
||||
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
|
||||
Label::Distance distance) {
|
||||
__ JumpIfSmi(value, target, distance);
|
||||
}
|
||||
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
|
||||
Label::Distance distance) {
|
||||
__ JumpIfNotSmi(value, target, distance);
|
||||
}
|
||||
|
||||
void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
|
||||
__ RecordCommentForOffHeapTrampoline(builtin);
|
||||
__ Call(__ EntryFromBuiltinIndexAsOperand(builtin));
|
||||
if (FLAG_code_comments) __ RecordComment("]");
|
||||
}
|
||||
|
||||
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
|
||||
__ RecordCommentForOffHeapTrampoline(builtin);
|
||||
__ Jump(__ EntryFromBuiltinIndexAsOperand(builtin));
|
||||
if (FLAG_code_comments) __ RecordComment("]");
|
||||
}
|
||||
|
||||
void BaselineAssembler::Test(Register value, int mask) {
|
||||
if ((mask & 0xff) == mask) {
|
||||
__ testb(value, Immediate(mask));
|
||||
} else {
|
||||
__ testl(value, Immediate(mask));
|
||||
}
|
||||
}
|
||||
|
||||
void BaselineAssembler::CmpObjectType(Register object,
|
||||
InstanceType instance_type,
|
||||
Register map) {
|
||||
__ CmpObjectType(object, instance_type, map);
|
||||
}
|
||||
void BaselineAssembler::CmpInstanceType(Register value,
|
||||
InstanceType instance_type) {
|
||||
__ CmpInstanceType(value, instance_type);
|
||||
}
|
||||
void BaselineAssembler::Cmp(Register value, Smi smi) { __ Cmp(value, smi); }
|
||||
void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
|
||||
__ cmpq(value, operand);
|
||||
}
|
||||
void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
|
||||
__ SmiCompare(lhs, rhs);
|
||||
}
|
||||
// cmp_tagged
|
||||
void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
|
||||
__ cmp_tagged(value, operand);
|
||||
}
|
||||
void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
|
||||
__ cmp_tagged(operand, value);
|
||||
}
|
||||
void BaselineAssembler::CompareByte(Register value, int32_t byte) {
|
||||
__ cmpb(value, Immediate(byte));
|
||||
}
|
||||
|
||||
void BaselineAssembler::Move(interpreter::Register output, Register source) {
|
||||
return __ movq(RegisterFrameOperand(output), source);
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, TaggedIndex value) {
|
||||
__ Move(output, value);
|
||||
}
|
||||
void BaselineAssembler::Move(MemOperand output, Register source) {
|
||||
__ movq(output, source);
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, ExternalReference reference) {
|
||||
__ Move(output, reference);
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
|
||||
__ Move(output, value);
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, int32_t value) {
|
||||
__ Move(output, Immediate(value));
|
||||
}
|
||||
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
|
||||
__ mov_tagged(output, source);
|
||||
}
|
||||
void BaselineAssembler::MoveSmi(Register output, Register source) {
|
||||
__ mov_tagged(output, source);
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
void PushSingle(MacroAssembler* masm, RootIndex source) {
|
||||
masm->PushRoot(source);
|
||||
}
|
||||
void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
|
||||
void PushSingle(MacroAssembler* masm, TaggedIndex value) { masm->Push(value); }
|
||||
void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); }
|
||||
void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
|
||||
masm->Push(object);
|
||||
}
|
||||
void PushSingle(MacroAssembler* masm, int32_t immediate) {
|
||||
masm->Push(Immediate(immediate));
|
||||
}
|
||||
void PushSingle(MacroAssembler* masm, MemOperand operand) {
|
||||
masm->Push(operand);
|
||||
}
|
||||
void PushSingle(MacroAssembler* masm, interpreter::Register source) {
|
||||
return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source));
|
||||
}
|
||||
|
||||
template <typename Arg>
|
||||
struct PushHelper {
|
||||
static int Push(BaselineAssembler* basm, Arg arg) {
|
||||
PushSingle(basm->masm(), arg);
|
||||
return 1;
|
||||
}
|
||||
static int PushReverse(BaselineAssembler* basm, Arg arg) {
|
||||
return Push(basm, arg);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct PushHelper<interpreter::RegisterList> {
|
||||
static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
|
||||
for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
|
||||
PushSingle(basm->masm(), list[reg_index]);
|
||||
}
|
||||
return list.register_count();
|
||||
}
|
||||
static int PushReverse(BaselineAssembler* basm,
|
||||
interpreter::RegisterList list) {
|
||||
for (int reg_index = list.register_count() - 1; reg_index >= 0;
|
||||
--reg_index) {
|
||||
PushSingle(basm->masm(), list[reg_index]);
|
||||
}
|
||||
return list.register_count();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename... Args>
|
||||
struct PushAllHelper;
|
||||
template <>
|
||||
struct PushAllHelper<> {
|
||||
static int Push(BaselineAssembler* masm) { return 0; }
|
||||
static int PushReverse(BaselineAssembler* masm) { return 0; }
|
||||
};
|
||||
template <typename Arg, typename... Args>
|
||||
struct PushAllHelper<Arg, Args...> {
|
||||
static int Push(BaselineAssembler* masm, Arg arg, Args... args) {
|
||||
int nargs = PushHelper<Arg>::Push(masm, arg);
|
||||
return nargs + PushAllHelper<Args...>::Push(masm, args...);
|
||||
}
|
||||
static int PushReverse(BaselineAssembler* masm, Arg arg, Args... args) {
|
||||
int nargs = PushAllHelper<Args...>::PushReverse(masm, args...);
|
||||
return nargs + PushHelper<Arg>::PushReverse(masm, arg);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
template <typename... T>
|
||||
int BaselineAssembler::Push(T... vals) {
|
||||
return detail::PushAllHelper<T...>::Push(this, vals...);
|
||||
}
|
||||
|
||||
template <typename... T>
|
||||
void BaselineAssembler::PushReverse(T... vals) {
|
||||
detail::PushAllHelper<T...>::PushReverse(this, vals...);
|
||||
}
|
||||
|
||||
template <typename... T>
|
||||
void BaselineAssembler::Pop(T... registers) {
|
||||
ITERATE_PACK(__ Pop(registers));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
||||
int offset) {
|
||||
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||
int offset) {
|
||||
__ LoadTaggedSignedField(output, FieldOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
||||
int offset) {
|
||||
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadByteField(Register output, Register source,
|
||||
int offset) {
|
||||
__ movb(output, FieldOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
|
||||
Smi value) {
|
||||
__ StoreTaggedSignedField(FieldOperand(target, offset), value);
|
||||
}
|
||||
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
|
||||
int offset,
|
||||
|
||||
Register value) {
|
||||
BaselineAssembler::ScratchRegisterScope scratch_scope(this);
|
||||
Register scratch = scratch_scope.AcquireScratch();
|
||||
DCHECK_NE(target, scratch);
|
||||
DCHECK_NE(value, scratch);
|
||||
__ StoreTaggedField(FieldOperand(target, offset), value);
|
||||
__ RecordWriteField(target, offset, value, scratch, kDontSaveFPRegs);
|
||||
}
|
||||
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
|
||||
int offset,
|
||||
Register value) {
|
||||
__ StoreTaggedField(FieldOperand(target, offset), value);
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
__ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||
Immediate(weight));
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddToInterruptBudget(Register weight) {
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
__ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||
weight);
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
|
||||
if (rhs.value() == 0) return;
|
||||
if (SmiValuesAre31Bits()) {
|
||||
__ addl(lhs, Immediate(rhs));
|
||||
} else {
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register rhs_reg = scratch_scope.AcquireScratch();
|
||||
__ Move(rhs_reg, rhs);
|
||||
__ addq(lhs, rhs_reg);
|
||||
}
|
||||
}
|
||||
|
||||
void BaselineAssembler::Switch(Register reg, int case_value_base,
|
||||
Label** labels, int num_labels) {
|
||||
ScratchRegisterScope scope(this);
|
||||
Register table = scope.AcquireScratch();
|
||||
Label fallthrough, jump_table;
|
||||
if (case_value_base > 0) {
|
||||
__ subq(reg, Immediate(case_value_base));
|
||||
}
|
||||
__ cmpq(reg, Immediate(num_labels));
|
||||
__ j(above_equal, &fallthrough);
|
||||
__ leaq(table, MemOperand(&jump_table));
|
||||
__ jmp(MemOperand(table, reg, times_8, 0));
|
||||
// Emit the jump table inline, under the assumption that it's not too big.
|
||||
__ Align(kSystemPointerSize);
|
||||
__ bind(&jump_table);
|
||||
for (int i = 0; i < num_labels; ++i) {
|
||||
__ dq(labels[i]);
|
||||
}
|
||||
__ bind(&fallthrough);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#define __ basm_.
|
||||
|
||||
void BaselineCompiler::Prologue() {
|
||||
__ Move(kInterpreterBytecodeArrayRegister, bytecode_);
|
||||
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
|
||||
CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
|
||||
kJSFunctionRegister, kJavaScriptCallArgCountRegister,
|
||||
kInterpreterBytecodeArrayRegister);
|
||||
|
||||
PrologueFillFrame();
|
||||
}
|
||||
|
||||
void BaselineCompiler::PrologueFillFrame() {
|
||||
__ RecordComment("[ Fill frame");
|
||||
// Inlined register frame fill
|
||||
interpreter::Register new_target_or_generator_register =
|
||||
bytecode_->incoming_new_target_or_generator_register();
|
||||
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||
int register_count = bytecode_->register_count();
|
||||
// Magic value
|
||||
const int kLoopUnrollSize = 8;
|
||||
const int new_target_index = new_target_or_generator_register.index();
|
||||
const bool has_new_target = new_target_index != kMaxInt;
|
||||
int i = 0;
|
||||
if (has_new_target) {
|
||||
DCHECK_LE(new_target_index, register_count);
|
||||
for (; i < new_target_index; i++) {
|
||||
__ Push(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
// Push new_target_or_generator.
|
||||
__ Push(kJavaScriptCallNewTargetRegister);
|
||||
i++;
|
||||
}
|
||||
if (register_count < 2 * kLoopUnrollSize) {
|
||||
// If the frame is small enough, just unroll the frame fill completely.
|
||||
for (; i < register_count; ++i) {
|
||||
__ Push(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
} else {
|
||||
register_count -= i;
|
||||
i = 0;
|
||||
// Extract the first few registers to round to the unroll size.
|
||||
int first_registers = register_count % kLoopUnrollSize;
|
||||
for (; i < first_registers; ++i) {
|
||||
__ Push(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
BaselineAssembler::ScratchRegisterScope scope(&basm_);
|
||||
Register scratch = scope.AcquireScratch();
|
||||
__ Move(scratch, register_count / kLoopUnrollSize);
|
||||
// We enter the loop unconditionally, so make sure we need to loop at least
|
||||
// once.
|
||||
DCHECK_GT(register_count / kLoopUnrollSize, 0);
|
||||
Label loop;
|
||||
__ Bind(&loop);
|
||||
for (int j = 0; j < kLoopUnrollSize; ++j) {
|
||||
__ Push(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
__ masm()->decl(scratch);
|
||||
__ JumpIf(Condition::kGreaterThan, &loop);
|
||||
}
|
||||
__ RecordComment("]");
|
||||
}
|
||||
|
||||
void BaselineCompiler::VerifyFrameSize() {
|
||||
__ Move(kScratchRegister, rsp);
|
||||
__ masm()->addq(kScratchRegister,
|
||||
Immediate(InterpreterFrameConstants::kFixedFrameSizeFromFp +
|
||||
bytecode_->frame_size()));
|
||||
__ masm()->cmpq(kScratchRegister, rbp);
|
||||
__ masm()->Assert(equal, AbortReason::kUnexpectedStackPointer);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#define __ basm.
|
||||
|
||||
void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
|
||||
BaselineAssembler basm(masm);
|
||||
|
||||
Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
|
||||
Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
|
||||
|
||||
__ RecordComment("[ Update Interrupt Budget");
|
||||
__ AddToInterruptBudget(weight);
|
||||
|
||||
// Use compare flags set by add
|
||||
// TODO(v8:11429,leszeks): This might be trickier cross-arch.
|
||||
Label skip_interrupt_label;
|
||||
__ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
|
||||
{
|
||||
__ masm()->SmiTag(params_size);
|
||||
__ Push(params_size, kInterpreterAccumulatorRegister);
|
||||
|
||||
__ LoadContext(kContextRegister);
|
||||
__ Push(MemOperand(rbp, InterpreterFrameConstants::kFunctionOffset));
|
||||
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
|
||||
|
||||
__ Pop(kInterpreterAccumulatorRegister, params_size);
|
||||
__ masm()->SmiUntag(params_size);
|
||||
}
|
||||
__ RecordComment("]");
|
||||
|
||||
__ Bind(&skip_interrupt_label);
|
||||
|
||||
BaselineAssembler::ScratchRegisterScope scope(&basm);
|
||||
Register scratch = scope.AcquireScratch();
|
||||
|
||||
Register actual_params_size = scratch;
|
||||
// Compute the size of the actual parameters + receiver (in bytes).
|
||||
__ masm()->movq(actual_params_size,
|
||||
MemOperand(rbp, StandardFrameConstants::kArgCOffset));
|
||||
|
||||
// If actual is bigger than formal, then we should use it to free up the stack
|
||||
// arguments.
|
||||
Label corrected_args_count;
|
||||
__ masm()->cmpq(params_size, actual_params_size);
|
||||
__ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count, Label::kNear);
|
||||
__ masm()->movq(params_size, actual_params_size);
|
||||
__ Bind(&corrected_args_count);
|
||||
|
||||
// Leave the frame (also dropping the register file).
|
||||
__ masm()->LeaveFrame(StackFrame::MANUAL);
|
||||
|
||||
// Drop receiver + arguments.
|
||||
Register return_pc = scratch;
|
||||
__ masm()->PopReturnAddressTo(return_pc);
|
||||
__ masm()->leaq(rsp, MemOperand(rsp, params_size, times_system_pointer_size,
|
||||
kSystemPointerSize));
|
||||
__ masm()->PushReturnAddressFrom(return_pc);
|
||||
__ masm()->Ret();
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
} // namespace baseline
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_BASELINE_X64_BASELINE_COMPILER_X64_INL_H_
|
@ -5,6 +5,7 @@
|
||||
#if V8_TARGET_ARCH_ARM64
|
||||
|
||||
#include "src/api/api-arguments.h"
|
||||
#include "src/baseline/baseline-compiler.h"
|
||||
#include "src/codegen/code-factory.h"
|
||||
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
|
||||
#include "src/codegen/macro-assembler-inl.h"
|
||||
@ -18,6 +19,7 @@
|
||||
#include "src/objects/cell.h"
|
||||
#include "src/objects/foreign.h"
|
||||
#include "src/objects/heap-number.h"
|
||||
#include "src/objects/instance-type.h"
|
||||
#include "src/objects/js-generator.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
#include "src/objects/smi.h"
|
||||
@ -407,11 +409,16 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
|
||||
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
|
||||
}
|
||||
|
||||
static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
|
||||
Register sfi_data,
|
||||
Register scratch1) {
|
||||
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
|
||||
// the more general dispatch.
|
||||
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
Register sfi_data,
|
||||
Register scratch1,
|
||||
Label* is_baseline) {
|
||||
Label done;
|
||||
__ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
|
||||
__ CompareObjectType(sfi_data, scratch1, scratch1, BASELINE_DATA_TYPE);
|
||||
__ B(eq, is_baseline);
|
||||
__ Cmp(scratch1, INTERPRETER_DATA_TYPE);
|
||||
__ B(ne, &done);
|
||||
__ LoadTaggedPointerField(
|
||||
sfi_data,
|
||||
@ -514,13 +521,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
|
||||
// Underlying function needs to have bytecode available.
|
||||
if (FLAG_debug_code) {
|
||||
Label is_baseline;
|
||||
__ LoadTaggedPointerField(
|
||||
x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
|
||||
GetSharedFunctionInfoBytecode(masm, x3, x0);
|
||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, x3, x0, &is_baseline);
|
||||
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
|
||||
__ Assert(eq, AbortReason::kMissingBytecodeArray);
|
||||
__ bind(&is_baseline);
|
||||
}
|
||||
|
||||
// Resume (Ignition/TurboFan) generator object.
|
||||
@ -1153,6 +1162,174 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||
__ Bind(&end);
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
UseScratchRegisterScope temps(masm);
|
||||
// Need a few extra registers
|
||||
temps.Include(x14, x15);
|
||||
|
||||
auto descriptor = Builtins::CallInterfaceDescriptorFor(
|
||||
Builtins::kBaselineOutOfLinePrologue);
|
||||
Register closure = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = temps.AcquireX();
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
if (__ emit_debug_code()) {
|
||||
__ CompareObjectType(feedback_vector, x4, x4, FEEDBACK_VECTOR_TYPE);
|
||||
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
|
||||
}
|
||||
|
||||
__ RecordComment("[ Check optimization state");
|
||||
|
||||
// Read off the optimization state in the feedback vector.
|
||||
Register optimization_state = temps.AcquireW();
|
||||
__ Ldr(optimization_state,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
|
||||
// Check if there is optimized code or a optimization marker that needs to
|
||||
// be processed.
|
||||
Label has_optimized_code_or_marker;
|
||||
__ TestAndBranchIfAnySet(
|
||||
optimization_state,
|
||||
FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
|
||||
&has_optimized_code_or_marker);
|
||||
|
||||
// Increment invocation count for the function.
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register invocation_count = temps.AcquireW();
|
||||
__ Ldr(invocation_count,
|
||||
FieldMemOperand(feedback_vector,
|
||||
FeedbackVector::kInvocationCountOffset));
|
||||
__ Add(invocation_count, invocation_count, Operand(1));
|
||||
__ Str(invocation_count,
|
||||
FieldMemOperand(feedback_vector,
|
||||
FeedbackVector::kInvocationCountOffset));
|
||||
}
|
||||
|
||||
__ RecordComment("[ Frame Setup");
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
// Normally the first thing we'd do here is Push(lr, fp), but we already
|
||||
// entered the frame in BaselineCompiler::Prologue, as we had to use the
|
||||
// value lr had before the call to this BaselineOutOfLinePrologue builtin.
|
||||
|
||||
Register callee_context = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kCalleeContext);
|
||||
Register callee_js_function = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
__ Push(callee_context, callee_js_function);
|
||||
DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
|
||||
DCHECK_EQ(callee_js_function, kJSFunctionRegister);
|
||||
|
||||
Register argc = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
|
||||
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
|
||||
// the frame, so load it into a register.
|
||||
Register bytecodeArray = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
|
||||
|
||||
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
|
||||
// are 8-bit fields next to each other, so we could just optimize by writing
|
||||
// a 16-bit. These static asserts guard our assumption is valid.
|
||||
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
|
||||
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
|
||||
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
|
||||
__ Strh(wzr, FieldMemOperand(bytecodeArray,
|
||||
BytecodeArray::kOsrNestingLevelOffset));
|
||||
|
||||
__ Push(argc, bytecodeArray);
|
||||
|
||||
// Horrible hack: This should be the bytecode offset, but we calculate that
|
||||
// from the PC, so we cache the feedback vector in there instead.
|
||||
// TODO(v8:11429): Make this less a horrible hack, and more just a frame
|
||||
// difference, by improving the approach distinguishing ignition and sparkplug
|
||||
// frames.
|
||||
if (__ emit_debug_code()) {
|
||||
__ CompareObjectType(feedback_vector, x4, x4, FEEDBACK_VECTOR_TYPE);
|
||||
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
|
||||
}
|
||||
// Our stack is currently aligned. We have have to push something along with
|
||||
// the feedback vector to keep it that way -- we may as well start
|
||||
// initialising the register frame.
|
||||
// TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
|
||||
// `undefined` in the accumulator register, to skip the load in the baseline
|
||||
// code.
|
||||
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||
__ Push(feedback_vector, kInterpreterAccumulatorRegister);
|
||||
__ RecordComment("]");
|
||||
|
||||
__ RecordComment("[ Stack/interrupt check");
|
||||
Label call_stack_guard;
|
||||
{
|
||||
// Stack check. This folds the checks for both the interrupt stack limit
|
||||
// check and the real stack limit into one by just checking for the
|
||||
// interrupt limit. The interrupt limit is either equal to the real stack
|
||||
// limit or tighter. By ensuring we have space until that limit after
|
||||
// building the frame we can quickly precheck both at once.
|
||||
UseScratchRegisterScope temps(masm);
|
||||
|
||||
Register frame_size = temps.AcquireW();
|
||||
__ Ldr(frame_size,
|
||||
FieldMemOperand(bytecodeArray, BytecodeArray::kFrameSizeOffset));
|
||||
Register sp_minus_frame_size = frame_size.X();
|
||||
__ Sub(sp_minus_frame_size, sp, frame_size.X());
|
||||
Register interrupt_limit = temps.AcquireX();
|
||||
__ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
|
||||
__ Cmp(sp_minus_frame_size, interrupt_limit);
|
||||
__ B(lo, &call_stack_guard);
|
||||
__ RecordComment("]");
|
||||
}
|
||||
|
||||
// Do "fast" return to the caller pc in lr.
|
||||
// TODO(v8:11429): Document this frame setup better.
|
||||
__ Ret();
|
||||
|
||||
__ RecordComment("[ Optimized marker check");
|
||||
// TODO(v8:11429): Share this code with the InterpreterEntryTrampoline.
|
||||
__ bind(&has_optimized_code_or_marker);
|
||||
{
|
||||
Label maybe_has_optimized_code;
|
||||
// Drop the frame created by the baseline call.
|
||||
__ Pop<TurboAssembler::kAuthLR>(fp, lr);
|
||||
// Check if optimized code is available
|
||||
__ TestAndBranchIfAllClear(
|
||||
optimization_state,
|
||||
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
|
||||
&maybe_has_optimized_code);
|
||||
|
||||
Register optimization_marker = optimization_state.X();
|
||||
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
|
||||
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
|
||||
|
||||
__ bind(&maybe_has_optimized_code);
|
||||
Register optimized_code_entry = optimization_state.X();
|
||||
__ LoadAnyTaggedField(
|
||||
optimized_code_entry,
|
||||
FieldMemOperand(feedback_vector,
|
||||
FeedbackVector::kMaybeOptimizedCodeOffset));
|
||||
TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
|
||||
__ Trap();
|
||||
}
|
||||
__ RecordComment("]");
|
||||
|
||||
__ bind(&call_stack_guard);
|
||||
{
|
||||
FrameScope frame_scope(masm, StackFrame::INTERNAL);
|
||||
__ RecordComment("[ Stack/interrupt call");
|
||||
// Save incoming new target or generator
|
||||
__ Push(padreg, kJavaScriptCallNewTargetRegister);
|
||||
__ CallRuntime(Runtime::kStackGuard);
|
||||
__ Pop(kJavaScriptCallNewTargetRegister, padreg);
|
||||
__ RecordComment("]");
|
||||
}
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
// Generate code for entering a JS function with the interpreter.
|
||||
// On entry to the function the receiver and arguments have been pushed on the
|
||||
// stack left to right.
|
||||
@ -1178,7 +1355,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ LoadTaggedPointerField(
|
||||
kInterpreterBytecodeArrayRegister,
|
||||
FieldMemOperand(x4, SharedFunctionInfo::kFunctionDataOffset));
|
||||
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, x11);
|
||||
|
||||
Label is_baseline;
|
||||
GetSharedFunctionInfoBytecodeOrBaseline(
|
||||
masm, kInterpreterBytecodeArrayRegister, x11, &is_baseline);
|
||||
|
||||
// The bytecode array could have been flushed from the shared function info,
|
||||
// if so, call into CompileLazy.
|
||||
@ -1393,6 +1573,49 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
FeedbackVector::kMaybeOptimizedCodeOffset));
|
||||
TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
|
||||
|
||||
__ bind(&is_baseline);
|
||||
{
|
||||
// Load the feedback vector from the closure.
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
|
||||
Label prepare_for_baseline;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
// allocate it.
|
||||
__ LoadTaggedPointerField(
|
||||
x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
|
||||
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
|
||||
__ B(ne, &prepare_for_baseline);
|
||||
|
||||
// Read off the optimization state in the feedback vector.
|
||||
// TODO(v8:11429): Is this worth doing here? Baseline code will check it
|
||||
// anyway...
|
||||
__ Ldr(optimization_state,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
|
||||
// Check if there is optimized code or a optimization marker that needes to
|
||||
// be processed.
|
||||
__ TestAndBranchIfAnySet(
|
||||
optimization_state,
|
||||
FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
|
||||
&has_optimized_code_or_marker);
|
||||
|
||||
// Load the baseline code into the closure.
|
||||
__ LoadTaggedPointerField(
|
||||
x2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
|
||||
BaselineData::kBaselineCodeOffset));
|
||||
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
|
||||
ReplaceClosureCodeWithOptimizedCode(masm, x2, closure);
|
||||
__ JumpCodeObject(x2);
|
||||
|
||||
__ bind(&prepare_for_baseline);
|
||||
GenerateTailCallToReturnedCode(masm, Runtime::kPrepareForBaseline);
|
||||
}
|
||||
|
||||
__ bind(&compile_lazy);
|
||||
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
|
||||
__ Unreachable(); // Should not return.
|
||||
@ -1792,7 +2015,14 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register optimized_code_entry = kJavaScriptCallCodeStartRegister;
|
||||
TailCallOptimizedCodeSlot(masm, optimized_code_entry, temps.AcquireX());
|
||||
}
|
||||
|
||||
namespace {
|
||||
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
|
||||
@ -1805,9 +2035,11 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
|
||||
__ Bind(&skip);
|
||||
|
||||
// Drop the handler frame that is be sitting on top of the actual
|
||||
// JavaScript frame. This is the case then OSR is triggered from bytecode.
|
||||
__ LeaveFrame(StackFrame::STUB);
|
||||
if (is_interpreter) {
|
||||
// Drop the handler frame that is be sitting on top of the actual
|
||||
// JavaScript frame. This is the case then OSR is triggered from bytecode.
|
||||
__ LeaveFrame(StackFrame::STUB);
|
||||
}
|
||||
|
||||
// Load deoptimization data from the code object.
|
||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||
@ -1828,6 +2060,15 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
// And "return" to the OSR entry point of the function.
|
||||
__ Ret();
|
||||
}
|
||||
} // namespace
|
||||
|
||||
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
return OnStackReplacement(masm, true);
|
||||
}
|
||||
|
||||
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
|
||||
return OnStackReplacement(masm, false);
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
||||
|
@ -64,15 +64,51 @@ void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
|
||||
masm->isolate()->builtins()->CallFunction());
|
||||
}
|
||||
|
||||
TF_BUILTIN(Call_ReceiverIsNullOrUndefined_Baseline,
|
||||
CallOrConstructBuiltinsAssembler) {
|
||||
auto target = Parameter<Object>(Descriptor::kFunction);
|
||||
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
|
||||
auto context = LoadContextFromBaseline();
|
||||
auto feedback_vector = LoadFeedbackVectorFromBaseline();
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
CollectCallFeedback(target, context, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)));
|
||||
TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target,
|
||||
argc);
|
||||
}
|
||||
|
||||
TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_Baseline,
|
||||
CallOrConstructBuiltinsAssembler) {
|
||||
auto target = Parameter<Object>(Descriptor::kFunction);
|
||||
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
|
||||
auto context = LoadContextFromBaseline();
|
||||
auto feedback_vector = LoadFeedbackVectorFromBaseline();
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
CollectCallFeedback(target, context, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)));
|
||||
TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target,
|
||||
argc);
|
||||
}
|
||||
|
||||
TF_BUILTIN(Call_ReceiverIsAny_Baseline, CallOrConstructBuiltinsAssembler) {
|
||||
auto target = Parameter<Object>(Descriptor::kFunction);
|
||||
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
|
||||
auto context = LoadContextFromBaseline();
|
||||
auto feedback_vector = LoadFeedbackVectorFromBaseline();
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
CollectCallFeedback(target, context, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)));
|
||||
TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc);
|
||||
}
|
||||
|
||||
TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback,
|
||||
CallOrConstructBuiltinsAssembler) {
|
||||
auto target = Parameter<Object>(Descriptor::kFunction);
|
||||
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
auto maybe_feedback_vector =
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
|
||||
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
CollectCallFeedback(target, context, maybe_feedback_vector,
|
||||
CollectCallFeedback(target, context, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)));
|
||||
TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target,
|
||||
argc);
|
||||
@ -83,10 +119,9 @@ TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_WithFeedback,
|
||||
auto target = Parameter<Object>(Descriptor::kFunction);
|
||||
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
auto maybe_feedback_vector =
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
|
||||
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
CollectCallFeedback(target, context, maybe_feedback_vector,
|
||||
CollectCallFeedback(target, context, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)));
|
||||
TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target,
|
||||
argc);
|
||||
@ -96,10 +131,9 @@ TF_BUILTIN(Call_ReceiverIsAny_WithFeedback, CallOrConstructBuiltinsAssembler) {
|
||||
auto target = Parameter<Object>(Descriptor::kFunction);
|
||||
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
auto maybe_feedback_vector =
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
|
||||
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
CollectCallFeedback(target, context, maybe_feedback_vector,
|
||||
CollectCallFeedback(target, context, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)));
|
||||
TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc);
|
||||
}
|
||||
@ -434,10 +468,9 @@ TF_BUILTIN(CallWithArrayLike_WithFeedback, CallOrConstructBuiltinsAssembler) {
|
||||
base::Optional<TNode<Object>> new_target = base::nullopt;
|
||||
auto arguments_list = Parameter<Object>(Descriptor::kArgumentsList);
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
auto maybe_feedback_vector =
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
|
||||
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
CollectCallFeedback(target, context, maybe_feedback_vector,
|
||||
CollectCallFeedback(target, context, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)));
|
||||
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
|
||||
}
|
||||
@ -451,16 +484,28 @@ TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) {
|
||||
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
|
||||
}
|
||||
|
||||
TF_BUILTIN(CallWithSpread_Baseline, CallOrConstructBuiltinsAssembler) {
|
||||
auto target = Parameter<Object>(Descriptor::kTarget);
|
||||
base::Optional<TNode<Object>> new_target = base::nullopt;
|
||||
auto spread = Parameter<Object>(Descriptor::kSpread);
|
||||
auto args_count = UncheckedParameter<Int32T>(Descriptor::kArgumentsCount);
|
||||
auto context = LoadContextFromBaseline();
|
||||
auto feedback_vector = LoadFeedbackVectorFromBaseline();
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
CollectCallFeedback(target, context, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)));
|
||||
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
|
||||
}
|
||||
|
||||
TF_BUILTIN(CallWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
|
||||
auto target = Parameter<Object>(Descriptor::kTarget);
|
||||
base::Optional<TNode<Object>> new_target = base::nullopt;
|
||||
auto spread = Parameter<Object>(Descriptor::kSpread);
|
||||
auto args_count = UncheckedParameter<Int32T>(Descriptor::kArgumentsCount);
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
auto maybe_feedback_vector =
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
|
||||
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
CollectCallFeedback(target, context, maybe_feedback_vector,
|
||||
CollectCallFeedback(target, context, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)));
|
||||
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
|
||||
}
|
||||
|
@ -37,18 +37,46 @@ void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
|
||||
BUILTIN_CODE(masm->isolate(), ConstructFunction));
|
||||
}
|
||||
|
||||
// TODO(v8:11429): Here and below, consider sharing code with Foo_WithFeedback,
|
||||
// or removing the latter entirely.
|
||||
TF_BUILTIN(Construct_Baseline, CallOrConstructBuiltinsAssembler) {
|
||||
auto target = Parameter<Object>(Descriptor::kTarget);
|
||||
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
|
||||
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
|
||||
// TODO(v8:11429,verwaest): Only emit context loads where necessary
|
||||
auto context = LoadContextFromBaseline();
|
||||
// TODO(v8:11429,verwaest): Make sure CollectConstructFeedback knows we have a
|
||||
// feedback vector.
|
||||
auto feedback_vector = LoadFeedbackVectorFromBaseline();
|
||||
|
||||
TVARIABLE(AllocationSite, allocation_site);
|
||||
Label if_construct_generic(this), if_construct_array(this);
|
||||
CollectConstructFeedback(context, target, new_target, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)),
|
||||
&if_construct_generic, &if_construct_array,
|
||||
&allocation_site);
|
||||
|
||||
BIND(&if_construct_generic);
|
||||
TailCallBuiltin(Builtins::kConstruct, context, target, new_target, argc);
|
||||
|
||||
BIND(&if_construct_array);
|
||||
TailCallBuiltin(Builtins::kArrayConstructorImpl, context, target, new_target,
|
||||
argc, allocation_site.value());
|
||||
}
|
||||
|
||||
TF_BUILTIN(Construct_WithFeedback, CallOrConstructBuiltinsAssembler) {
|
||||
auto target = Parameter<Object>(Descriptor::kTarget);
|
||||
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
|
||||
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
auto maybe_feedback_vector =
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
|
||||
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
|
||||
TVARIABLE(AllocationSite, allocation_site);
|
||||
Label if_construct_generic(this), if_construct_array(this);
|
||||
CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
|
||||
CollectConstructFeedback(context, target, new_target, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)),
|
||||
&if_construct_generic, &if_construct_array,
|
||||
&allocation_site);
|
||||
@ -75,13 +103,12 @@ TF_BUILTIN(ConstructWithArrayLike_WithFeedback,
|
||||
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
|
||||
auto arguments_list = Parameter<Object>(Descriptor::kArgumentsList);
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
auto maybe_feedback_vector =
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
|
||||
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
|
||||
TVARIABLE(AllocationSite, allocation_site);
|
||||
Label if_construct_generic(this), if_construct_array(this);
|
||||
CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
|
||||
CollectConstructFeedback(context, target, new_target, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)),
|
||||
&if_construct_generic, &if_construct_array,
|
||||
&allocation_site);
|
||||
@ -103,6 +130,34 @@ TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
|
||||
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
|
||||
}
|
||||
|
||||
TF_BUILTIN(ConstructWithSpread_Baseline, CallOrConstructBuiltinsAssembler) {
|
||||
auto target = Parameter<Object>(Descriptor::kTarget);
|
||||
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
|
||||
auto spread = Parameter<Object>(Descriptor::kSpread);
|
||||
auto args_count =
|
||||
UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
|
||||
// TODO(v8:11429,verwaest): Only emit context loads where necessary
|
||||
auto context = LoadContextFromBaseline();
|
||||
// TODO(v8:11429,verwaest): Make sure CollectConstructFeedback knows we have a
|
||||
// feedback vector.
|
||||
auto feedback_vector = LoadFeedbackVectorFromBaseline();
|
||||
|
||||
TVARIABLE(AllocationSite, allocation_site);
|
||||
Label if_construct_generic(this), if_construct_array(this);
|
||||
CollectConstructFeedback(context, target, new_target, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)),
|
||||
&if_construct_generic, &if_construct_array,
|
||||
&allocation_site);
|
||||
|
||||
BIND(&if_construct_array);
|
||||
Goto(&if_construct_generic); // Not implemented.
|
||||
|
||||
BIND(&if_construct_generic);
|
||||
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
|
||||
}
|
||||
|
||||
TF_BUILTIN(ConstructWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
|
||||
auto target = Parameter<Object>(Descriptor::kTarget);
|
||||
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
|
||||
@ -110,13 +165,12 @@ TF_BUILTIN(ConstructWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
|
||||
auto args_count =
|
||||
UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
auto maybe_feedback_vector =
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
|
||||
auto feedback_vector = Parameter<HeapObject>(Descriptor::kFeedbackVector);
|
||||
auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
|
||||
|
||||
TVARIABLE(AllocationSite, allocation_site);
|
||||
Label if_construct_generic(this), if_construct_array(this);
|
||||
CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
|
||||
CollectConstructFeedback(context, target, new_target, feedback_vector,
|
||||
Unsigned(ChangeInt32ToIntPtr(slot)),
|
||||
&if_construct_generic, &if_construct_array,
|
||||
&allocation_site);
|
||||
|
@ -50,6 +50,9 @@ namespace internal {
|
||||
ASM(Call_ReceiverIsNullOrUndefined, CallTrampoline) \
|
||||
ASM(Call_ReceiverIsNotNullOrUndefined, CallTrampoline) \
|
||||
ASM(Call_ReceiverIsAny, CallTrampoline) \
|
||||
TFC(Call_ReceiverIsNullOrUndefined_Baseline, CallTrampoline_Baseline) \
|
||||
TFC(Call_ReceiverIsNotNullOrUndefined_Baseline, CallTrampoline_Baseline) \
|
||||
TFC(Call_ReceiverIsAny_Baseline, CallTrampoline_Baseline) \
|
||||
TFC(Call_ReceiverIsNullOrUndefined_WithFeedback, \
|
||||
CallTrampoline_WithFeedback) \
|
||||
TFC(Call_ReceiverIsNotNullOrUndefined_WithFeedback, \
|
||||
@ -60,6 +63,7 @@ namespace internal {
|
||||
TFC(CallProxy, CallTrampoline) \
|
||||
ASM(CallVarargs, CallVarargs) \
|
||||
TFC(CallWithSpread, CallWithSpread) \
|
||||
TFC(CallWithSpread_Baseline, CallWithSpread_Baseline) \
|
||||
TFC(CallWithSpread_WithFeedback, CallWithSpread_WithFeedback) \
|
||||
TFC(CallWithArrayLike, CallWithArrayLike) \
|
||||
TFC(CallWithArrayLike_WithFeedback, CallWithArrayLike_WithFeedback) \
|
||||
@ -82,12 +86,14 @@ namespace internal {
|
||||
ASM(Construct, JSTrampoline) \
|
||||
ASM(ConstructVarargs, ConstructVarargs) \
|
||||
TFC(ConstructWithSpread, ConstructWithSpread) \
|
||||
TFC(ConstructWithSpread_Baseline, ConstructWithSpread_Baseline) \
|
||||
TFC(ConstructWithSpread_WithFeedback, ConstructWithSpread_WithFeedback) \
|
||||
TFC(ConstructWithArrayLike, ConstructWithArrayLike) \
|
||||
TFC(ConstructWithArrayLike_WithFeedback, \
|
||||
ConstructWithArrayLike_WithFeedback) \
|
||||
ASM(ConstructForwardVarargs, ConstructForwardVarargs) \
|
||||
ASM(ConstructFunctionForwardVarargs, ConstructForwardVarargs) \
|
||||
TFC(Construct_Baseline, Construct_Baseline) \
|
||||
TFC(Construct_WithFeedback, Construct_WithFeedback) \
|
||||
ASM(JSConstructStubGeneric, Dummy) \
|
||||
ASM(JSBuiltinsConstructStub, Dummy) \
|
||||
@ -131,6 +137,11 @@ namespace internal {
|
||||
ASM(InterpreterEnterBytecodeDispatch, Dummy) \
|
||||
ASM(InterpreterOnStackReplacement, ContextOnly) \
|
||||
\
|
||||
/* Baseline Compiler */ \
|
||||
ASM(BaselineOutOfLinePrologue, BaselineOutOfLinePrologue) \
|
||||
ASM(BaselineOnStackReplacement, ContextOnly) \
|
||||
ASM(BaselineLeaveFrame, BaselineLeaveFrame) \
|
||||
\
|
||||
/* Code life-cycle */ \
|
||||
TFC(CompileLazy, JSTrampoline) \
|
||||
TFC(CompileLazyDeoptimizedCode, JSTrampoline) \
|
||||
@ -202,6 +213,8 @@ namespace internal {
|
||||
/* Type conversions continuations */ \
|
||||
TFC(ToBooleanLazyDeoptContinuation, SingleParameterOnStack) \
|
||||
\
|
||||
ASM(TailCallOptimizedCodeSlot, TailCallOptimizedCodeSlot) \
|
||||
\
|
||||
/* Handlers */ \
|
||||
TFH(KeyedLoadIC_PolymorphicName, LoadWithVector) \
|
||||
TFH(KeyedStoreIC_Megamorphic, Store) \
|
||||
@ -541,26 +554,41 @@ namespace internal {
|
||||
TFH(LoadIC_Megamorphic, LoadWithVector) \
|
||||
TFH(LoadIC_Noninlined, LoadWithVector) \
|
||||
TFH(LoadICTrampoline, Load) \
|
||||
TFH(LoadICBaseline, LoadBaseline) \
|
||||
TFH(LoadICTrampoline_Megamorphic, Load) \
|
||||
TFH(LoadSuperIC, LoadWithReceiverAndVector) \
|
||||
TFH(LoadSuperICBaseline, LoadWithReceiverBaseline) \
|
||||
TFH(KeyedLoadIC, LoadWithVector) \
|
||||
TFH(KeyedLoadIC_Megamorphic, LoadWithVector) \
|
||||
TFH(KeyedLoadICTrampoline, Load) \
|
||||
TFH(KeyedLoadICBaseline, LoadBaseline) \
|
||||
TFH(KeyedLoadICTrampoline_Megamorphic, Load) \
|
||||
TFH(StoreGlobalIC, StoreGlobalWithVector) \
|
||||
TFH(StoreGlobalICTrampoline, StoreGlobal) \
|
||||
TFH(StoreGlobalICBaseline, StoreGlobalBaseline) \
|
||||
TFH(StoreIC, StoreWithVector) \
|
||||
TFH(StoreICTrampoline, Store) \
|
||||
TFH(StoreICBaseline, StoreBaseline) \
|
||||
TFH(KeyedStoreIC, StoreWithVector) \
|
||||
TFH(KeyedStoreICTrampoline, Store) \
|
||||
TFH(KeyedStoreICBaseline, StoreBaseline) \
|
||||
TFH(StoreInArrayLiteralIC, StoreWithVector) \
|
||||
TFH(StoreInArrayLiteralICBaseline, StoreBaseline) \
|
||||
TFH(LookupContextBaseline, LookupBaseline) \
|
||||
TFH(LookupContextInsideTypeofBaseline, LookupBaseline) \
|
||||
TFH(LoadGlobalIC, LoadGlobalWithVector) \
|
||||
TFH(LoadGlobalICInsideTypeof, LoadGlobalWithVector) \
|
||||
TFH(LoadGlobalICTrampoline, LoadGlobal) \
|
||||
TFH(LoadGlobalICBaseline, LoadGlobalBaseline) \
|
||||
TFH(LoadGlobalICInsideTypeofTrampoline, LoadGlobal) \
|
||||
TFH(LoadGlobalICInsideTypeofBaseline, LoadGlobalBaseline) \
|
||||
TFH(LookupGlobalICBaseline, LookupBaseline) \
|
||||
TFH(LookupGlobalICInsideTypeofBaseline, LookupBaseline) \
|
||||
TFH(CloneObjectIC, CloneObjectWithVector) \
|
||||
TFH(CloneObjectICBaseline, CloneObjectBaseline) \
|
||||
TFH(CloneObjectIC_Slow, CloneObjectWithVector) \
|
||||
TFH(KeyedHasIC, LoadWithVector) \
|
||||
TFH(KeyedHasICBaseline, LoadBaseline) \
|
||||
TFH(KeyedHasIC_Megamorphic, LoadWithVector) \
|
||||
\
|
||||
/* IterableToList */ \
|
||||
@ -606,6 +634,19 @@ namespace internal {
|
||||
TFC(SameValueNumbersOnly, Compare) \
|
||||
\
|
||||
/* Binary ops with feedback collection */ \
|
||||
TFC(Add_Baseline, BinaryOp_Baseline) \
|
||||
TFC(Subtract_Baseline, BinaryOp_Baseline) \
|
||||
TFC(Multiply_Baseline, BinaryOp_Baseline) \
|
||||
TFC(Divide_Baseline, BinaryOp_Baseline) \
|
||||
TFC(Modulus_Baseline, BinaryOp_Baseline) \
|
||||
TFC(Exponentiate_Baseline, BinaryOp_Baseline) \
|
||||
TFC(BitwiseAnd_Baseline, BinaryOp_Baseline) \
|
||||
TFC(BitwiseOr_Baseline, BinaryOp_Baseline) \
|
||||
TFC(BitwiseXor_Baseline, BinaryOp_Baseline) \
|
||||
TFC(ShiftLeft_Baseline, BinaryOp_Baseline) \
|
||||
TFC(ShiftRight_Baseline, BinaryOp_Baseline) \
|
||||
TFC(ShiftRightLogical_Baseline, BinaryOp_Baseline) \
|
||||
\
|
||||
TFC(Add_WithFeedback, BinaryOp_WithFeedback) \
|
||||
TFC(Subtract_WithFeedback, BinaryOp_WithFeedback) \
|
||||
TFC(Multiply_WithFeedback, BinaryOp_WithFeedback) \
|
||||
@ -620,6 +661,13 @@ namespace internal {
|
||||
TFC(ShiftRightLogical_WithFeedback, BinaryOp_WithFeedback) \
|
||||
\
|
||||
/* Compare ops with feedback collection */ \
|
||||
TFC(Equal_Baseline, Compare_Baseline) \
|
||||
TFC(StrictEqual_Baseline, Compare_Baseline) \
|
||||
TFC(LessThan_Baseline, Compare_Baseline) \
|
||||
TFC(GreaterThan_Baseline, Compare_Baseline) \
|
||||
TFC(LessThanOrEqual_Baseline, Compare_Baseline) \
|
||||
TFC(GreaterThanOrEqual_Baseline, Compare_Baseline) \
|
||||
\
|
||||
TFC(Equal_WithFeedback, Compare_WithFeedback) \
|
||||
TFC(StrictEqual_WithFeedback, Compare_WithFeedback) \
|
||||
TFC(LessThan_WithFeedback, Compare_WithFeedback) \
|
||||
@ -628,6 +676,10 @@ namespace internal {
|
||||
TFC(GreaterThanOrEqual_WithFeedback, Compare_WithFeedback) \
|
||||
\
|
||||
/* Unary ops with feedback collection */ \
|
||||
TFC(BitwiseNot_Baseline, UnaryOp_Baseline) \
|
||||
TFC(Decrement_Baseline, UnaryOp_Baseline) \
|
||||
TFC(Increment_Baseline, UnaryOp_Baseline) \
|
||||
TFC(Negate_Baseline, UnaryOp_Baseline) \
|
||||
TFC(BitwiseNot_WithFeedback, UnaryOp_WithFeedback) \
|
||||
TFC(Decrement_WithFeedback, UnaryOp_WithFeedback) \
|
||||
TFC(Increment_WithFeedback, UnaryOp_WithFeedback) \
|
||||
@ -668,9 +720,11 @@ namespace internal {
|
||||
TFC(OrdinaryHasInstance, Compare) \
|
||||
TFC(InstanceOf, Compare) \
|
||||
TFC(InstanceOf_WithFeedback, Compare_WithFeedback) \
|
||||
TFC(InstanceOf_Baseline, Compare_Baseline) \
|
||||
\
|
||||
/* for-in */ \
|
||||
TFS(ForInEnumerate, kReceiver) \
|
||||
TFC(ForInPrepare, ForInPrepare) \
|
||||
TFS(ForInFilter, kKey, kObject) \
|
||||
\
|
||||
/* Reflect */ \
|
||||
|
@ -27,24 +27,33 @@ IC_BUILTIN(LoadIC_Megamorphic)
|
||||
IC_BUILTIN(LoadIC_Noninlined)
|
||||
IC_BUILTIN(LoadIC_NoFeedback)
|
||||
IC_BUILTIN(LoadICTrampoline)
|
||||
IC_BUILTIN(LoadICBaseline)
|
||||
IC_BUILTIN(LoadICTrampoline_Megamorphic)
|
||||
IC_BUILTIN(LoadSuperIC)
|
||||
IC_BUILTIN(LoadSuperICBaseline)
|
||||
IC_BUILTIN(KeyedLoadIC)
|
||||
IC_BUILTIN(KeyedLoadIC_Megamorphic)
|
||||
IC_BUILTIN(KeyedLoadIC_PolymorphicName)
|
||||
IC_BUILTIN(KeyedLoadICTrampoline)
|
||||
IC_BUILTIN(KeyedLoadICBaseline)
|
||||
IC_BUILTIN(KeyedLoadICTrampoline_Megamorphic)
|
||||
IC_BUILTIN(LoadGlobalIC_NoFeedback)
|
||||
IC_BUILTIN(StoreGlobalIC)
|
||||
IC_BUILTIN(StoreGlobalICTrampoline)
|
||||
IC_BUILTIN(StoreGlobalICBaseline)
|
||||
IC_BUILTIN(StoreIC)
|
||||
IC_BUILTIN(StoreICTrampoline)
|
||||
IC_BUILTIN(StoreICBaseline)
|
||||
IC_BUILTIN(KeyedStoreIC)
|
||||
IC_BUILTIN(KeyedStoreICTrampoline)
|
||||
IC_BUILTIN(KeyedStoreICBaseline)
|
||||
IC_BUILTIN(StoreInArrayLiteralIC)
|
||||
IC_BUILTIN(StoreInArrayLiteralICBaseline)
|
||||
IC_BUILTIN(CloneObjectIC)
|
||||
IC_BUILTIN(CloneObjectICBaseline)
|
||||
IC_BUILTIN(CloneObjectIC_Slow)
|
||||
IC_BUILTIN(KeyedHasIC)
|
||||
IC_BUILTIN(KeyedHasICBaseline)
|
||||
IC_BUILTIN(KeyedHasIC_Megamorphic)
|
||||
IC_BUILTIN(KeyedHasIC_PolymorphicName)
|
||||
|
||||
@ -54,6 +63,17 @@ IC_BUILTIN_PARAM(LoadGlobalICTrampoline, LoadGlobalICTrampoline,
|
||||
NOT_INSIDE_TYPEOF)
|
||||
IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofTrampoline, LoadGlobalICTrampoline,
|
||||
INSIDE_TYPEOF)
|
||||
IC_BUILTIN_PARAM(LoadGlobalICBaseline, LoadGlobalICBaseline, NOT_INSIDE_TYPEOF)
|
||||
IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofBaseline, LoadGlobalICBaseline,
|
||||
INSIDE_TYPEOF)
|
||||
IC_BUILTIN_PARAM(LookupGlobalICBaseline, LookupGlobalICBaseline,
|
||||
NOT_INSIDE_TYPEOF)
|
||||
IC_BUILTIN_PARAM(LookupGlobalICInsideTypeofBaseline, LookupGlobalICBaseline,
|
||||
INSIDE_TYPEOF)
|
||||
IC_BUILTIN_PARAM(LookupContextBaseline, LookupContextBaseline,
|
||||
NOT_INSIDE_TYPEOF)
|
||||
IC_BUILTIN_PARAM(LookupContextInsideTypeofBaseline, LookupContextBaseline,
|
||||
INSIDE_TYPEOF)
|
||||
|
||||
TF_BUILTIN(DynamicCheckMaps, CodeStubAssembler) {
|
||||
auto map = Parameter<Map>(Descriptor::kMap);
|
||||
|
@ -3,6 +3,7 @@
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/api/api.h"
|
||||
#include "src/baseline/baseline-compiler.h"
|
||||
#include "src/builtins/builtins-utils-gen.h"
|
||||
#include "src/builtins/builtins.h"
|
||||
#include "src/codegen/code-stub-assembler.h"
|
||||
@ -687,6 +688,20 @@ TF_BUILTIN(ForInEnumerate, CodeStubAssembler) {
|
||||
TailCallRuntime(Runtime::kForInEnumerate, context, receiver);
|
||||
}
|
||||
|
||||
TF_BUILTIN(ForInPrepare, CodeStubAssembler) {
|
||||
// The {enumerator} is either a Map or a FixedArray.
|
||||
auto enumerator = Parameter<HeapObject>(Descriptor::kEnumerator);
|
||||
auto index = Parameter<TaggedIndex>(Descriptor::kVectorIndex);
|
||||
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
|
||||
TNode<UintPtrT> vector_index = Unsigned(TaggedIndexToIntPtr(index));
|
||||
|
||||
TNode<FixedArray> cache_array;
|
||||
TNode<Smi> cache_length;
|
||||
ForInPrepare(enumerator, vector_index, feedback_vector, &cache_array,
|
||||
&cache_length, true);
|
||||
Return(cache_array, cache_length);
|
||||
}
|
||||
|
||||
TF_BUILTIN(ForInFilter, CodeStubAssembler) {
|
||||
auto key = Parameter<String>(Descriptor::kKey);
|
||||
auto object = Parameter<HeapObject>(Descriptor::kObject);
|
||||
@ -909,6 +924,28 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
|
||||
}
|
||||
#endif // V8_TARGET_ARCH_IA32
|
||||
|
||||
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
|
||||
// architectures.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
|
||||
baseline::BaselineAssembler::EmitReturn(masm);
|
||||
}
|
||||
#else
|
||||
// Stub out implementations of arch-specific baseline builtins.
|
||||
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
masm->Trap();
|
||||
}
|
||||
void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
|
||||
masm->Trap();
|
||||
}
|
||||
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
|
||||
masm->Trap();
|
||||
}
|
||||
void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
|
||||
masm->Trap();
|
||||
}
|
||||
#endif
|
||||
|
||||
// ES6 [[Get]] operation.
|
||||
TF_BUILTIN(GetProperty, CodeStubAssembler) {
|
||||
auto object = Parameter<Object>(Descriptor::kObject);
|
||||
|
@ -143,13 +143,28 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
|
||||
isolate(), CompileLazy))));
|
||||
StoreObjectField(function, JSFunction::kCodeOffset, sfi_code);
|
||||
|
||||
Label tailcall_code(this);
|
||||
Label baseline(this);
|
||||
|
||||
TVARIABLE(Code, code);
|
||||
|
||||
// Check if we have baseline code.
|
||||
// TODO(v8:11429): We already know if we have baseline code in
|
||||
// GetSharedFunctionInfoCode, make that jump to here.
|
||||
TNode<Uint32T> code_flags =
|
||||
LoadObjectField<Uint32T>(sfi_code, Code::kFlagsOffset);
|
||||
TNode<Uint32T> code_kind = DecodeWord32<Code::KindField>(code_flags);
|
||||
TNode<BoolT> is_baseline =
|
||||
IsEqualInWord32<Code::KindField>(code_kind, CodeKind::SPARKPLUG);
|
||||
GotoIf(is_baseline, &baseline);
|
||||
|
||||
// Finally, check for presence of an NCI cached Code object - if an entry
|
||||
// possibly exists, call into runtime to query the cache.
|
||||
TNode<Uint8T> flags2 =
|
||||
LoadObjectField<Uint8T>(shared, SharedFunctionInfo::kFlags2Offset);
|
||||
TNode<BoolT> may_have_cached_code =
|
||||
IsSetWord32<SharedFunctionInfo::MayHaveCachedCodeBit>(flags2);
|
||||
TNode<Code> code = Select<Code>(
|
||||
code = Select<Code>(
|
||||
may_have_cached_code,
|
||||
[=]() {
|
||||
return CAST(CallRuntime(Runtime::kTryInstallNCICode,
|
||||
@ -157,9 +172,21 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
|
||||
function));
|
||||
},
|
||||
[=]() { return sfi_code; });
|
||||
Goto(&tailcall_code);
|
||||
|
||||
BIND(&baseline);
|
||||
// Ensure we have a feedback vector.
|
||||
code = Select<Code>(
|
||||
IsFeedbackVector(feedback_cell_value), [=]() { return sfi_code; },
|
||||
[=]() {
|
||||
return CAST(CallRuntime(Runtime::kPrepareForBaseline,
|
||||
Parameter<Context>(Descriptor::kContext),
|
||||
function));
|
||||
});
|
||||
Goto(&tailcall_code);
|
||||
BIND(&tailcall_code);
|
||||
// Jump to the selected code entry.
|
||||
GenerateTailCallToJSCode(code, function);
|
||||
GenerateTailCallToJSCode(code.value(), function);
|
||||
|
||||
BIND(&compile_function);
|
||||
GenerateTailCallToReturnedCode(Runtime::kCompileLazy, function);
|
||||
|
@ -14,20 +14,21 @@ namespace internal {
|
||||
// -----------------------------------------------------------------------------
|
||||
// ES6 section 20.1 Number Objects
|
||||
|
||||
#define DEF_BINOP(Name, Generator) \
|
||||
TF_BUILTIN(Name, CodeStubAssembler) { \
|
||||
auto lhs = Parameter<Object>(Descriptor::kLeft); \
|
||||
auto rhs = Parameter<Object>(Descriptor::kRight); \
|
||||
auto context = Parameter<Context>(Descriptor::kContext); \
|
||||
auto maybe_feedback_vector = \
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector); \
|
||||
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
|
||||
\
|
||||
BinaryOpAssembler binop_asm(state()); \
|
||||
TNode<Object> result = binop_asm.Generator(context, lhs, rhs, slot, \
|
||||
maybe_feedback_vector, false); \
|
||||
\
|
||||
Return(result); \
|
||||
#define DEF_BINOP(Name, Generator) \
|
||||
TF_BUILTIN(Name, CodeStubAssembler) { \
|
||||
auto lhs = Parameter<Object>(Descriptor::kLeft); \
|
||||
auto rhs = Parameter<Object>(Descriptor::kRight); \
|
||||
auto context = Parameter<Context>(Descriptor::kContext); \
|
||||
auto feedback_vector = \
|
||||
Parameter<FeedbackVector>(Descriptor::kFeedbackVector); \
|
||||
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
|
||||
\
|
||||
BinaryOpAssembler binop_asm(state()); \
|
||||
TNode<Object> result = \
|
||||
binop_asm.Generator([&]() { return context; }, lhs, rhs, slot, \
|
||||
[&]() { return feedback_vector; }, true, false); \
|
||||
\
|
||||
Return(result); \
|
||||
}
|
||||
DEF_BINOP(Add_WithFeedback, Generate_AddWithFeedback)
|
||||
DEF_BINOP(Subtract_WithFeedback, Generate_SubtractWithFeedback)
|
||||
@ -44,17 +45,44 @@ DEF_BINOP(ShiftRightLogical_WithFeedback,
|
||||
Generate_ShiftRightLogicalWithFeedback)
|
||||
#undef DEF_BINOP
|
||||
|
||||
#define DEF_BINOP(Name, Generator) \
|
||||
TF_BUILTIN(Name, CodeStubAssembler) { \
|
||||
auto lhs = Parameter<Object>(Descriptor::kLeft); \
|
||||
auto rhs = Parameter<Object>(Descriptor::kRight); \
|
||||
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
|
||||
\
|
||||
BinaryOpAssembler binop_asm(state()); \
|
||||
TNode<Object> result = binop_asm.Generator( \
|
||||
[&]() { return LoadContextFromBaseline(); }, lhs, rhs, slot, \
|
||||
[&]() { return LoadFeedbackVectorFromBaseline(); }, true, false); \
|
||||
\
|
||||
Return(result); \
|
||||
}
|
||||
DEF_BINOP(Add_Baseline, Generate_AddWithFeedback)
|
||||
DEF_BINOP(Subtract_Baseline, Generate_SubtractWithFeedback)
|
||||
DEF_BINOP(Multiply_Baseline, Generate_MultiplyWithFeedback)
|
||||
DEF_BINOP(Divide_Baseline, Generate_DivideWithFeedback)
|
||||
DEF_BINOP(Modulus_Baseline, Generate_ModulusWithFeedback)
|
||||
DEF_BINOP(Exponentiate_Baseline, Generate_ExponentiateWithFeedback)
|
||||
DEF_BINOP(BitwiseOr_Baseline, Generate_BitwiseOrWithFeedback)
|
||||
DEF_BINOP(BitwiseXor_Baseline, Generate_BitwiseXorWithFeedback)
|
||||
DEF_BINOP(BitwiseAnd_Baseline, Generate_BitwiseAndWithFeedback)
|
||||
DEF_BINOP(ShiftLeft_Baseline, Generate_ShiftLeftWithFeedback)
|
||||
DEF_BINOP(ShiftRight_Baseline, Generate_ShiftRightWithFeedback)
|
||||
DEF_BINOP(ShiftRightLogical_Baseline, Generate_ShiftRightLogicalWithFeedback)
|
||||
#undef DEF_BINOP
|
||||
|
||||
#define DEF_UNOP(Name, Generator) \
|
||||
TF_BUILTIN(Name, CodeStubAssembler) { \
|
||||
auto value = Parameter<Object>(Descriptor::kValue); \
|
||||
auto context = Parameter<Context>(Descriptor::kContext); \
|
||||
auto maybe_feedback_vector = \
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector); \
|
||||
auto feedback_vector = \
|
||||
Parameter<FeedbackVector>(Descriptor::kFeedbackVector); \
|
||||
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
|
||||
\
|
||||
UnaryOpAssembler a(state()); \
|
||||
TNode<Object> result = \
|
||||
a.Generator(context, value, slot, maybe_feedback_vector); \
|
||||
a.Generator(context, value, slot, feedback_vector, true); \
|
||||
\
|
||||
Return(result); \
|
||||
}
|
||||
@ -64,19 +92,38 @@ DEF_UNOP(Increment_WithFeedback, Generate_IncrementWithFeedback)
|
||||
DEF_UNOP(Negate_WithFeedback, Generate_NegateWithFeedback)
|
||||
#undef DEF_UNOP
|
||||
|
||||
#define DEF_UNOP(Name, Generator) \
|
||||
TF_BUILTIN(Name, CodeStubAssembler) { \
|
||||
auto value = Parameter<Object>(Descriptor::kValue); \
|
||||
auto context = LoadContextFromBaseline(); \
|
||||
auto feedback_vector = LoadFeedbackVectorFromBaseline(); \
|
||||
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
|
||||
\
|
||||
UnaryOpAssembler a(state()); \
|
||||
TNode<Object> result = \
|
||||
a.Generator(context, value, slot, feedback_vector, true); \
|
||||
\
|
||||
Return(result); \
|
||||
}
|
||||
DEF_UNOP(BitwiseNot_Baseline, Generate_BitwiseNotWithFeedback)
|
||||
DEF_UNOP(Decrement_Baseline, Generate_DecrementWithFeedback)
|
||||
DEF_UNOP(Increment_Baseline, Generate_IncrementWithFeedback)
|
||||
DEF_UNOP(Negate_Baseline, Generate_NegateWithFeedback)
|
||||
#undef DEF_UNOP
|
||||
|
||||
#define DEF_COMPARE(Name) \
|
||||
TF_BUILTIN(Name##_WithFeedback, CodeStubAssembler) { \
|
||||
auto lhs = Parameter<Object>(Descriptor::kLeft); \
|
||||
auto rhs = Parameter<Object>(Descriptor::kRight); \
|
||||
auto context = Parameter<Context>(Descriptor::kContext); \
|
||||
auto maybe_feedback_vector = \
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector); \
|
||||
auto feedback_vector = \
|
||||
Parameter<FeedbackVector>(Descriptor::kFeedbackVector); \
|
||||
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
|
||||
\
|
||||
TVARIABLE(Smi, var_type_feedback); \
|
||||
TNode<Oddball> result = RelationalComparison(Operation::k##Name, lhs, rhs, \
|
||||
context, &var_type_feedback); \
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot); \
|
||||
UpdateFeedback(var_type_feedback.value(), feedback_vector, slot); \
|
||||
\
|
||||
Return(result); \
|
||||
}
|
||||
@ -86,17 +133,38 @@ DEF_COMPARE(GreaterThan)
|
||||
DEF_COMPARE(GreaterThanOrEqual)
|
||||
#undef DEF_COMPARE
|
||||
|
||||
#define DEF_COMPARE(Name) \
|
||||
TF_BUILTIN(Name##_Baseline, CodeStubAssembler) { \
|
||||
auto lhs = Parameter<Object>(Descriptor::kLeft); \
|
||||
auto rhs = Parameter<Object>(Descriptor::kRight); \
|
||||
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
|
||||
\
|
||||
TVARIABLE(Smi, var_type_feedback); \
|
||||
TNode<Oddball> result = RelationalComparison( \
|
||||
Operation::k##Name, lhs, rhs, \
|
||||
[&]() { return LoadContextFromBaseline(); }, &var_type_feedback); \
|
||||
auto feedback_vector = LoadFeedbackVectorFromBaseline(); \
|
||||
UpdateFeedback(var_type_feedback.value(), feedback_vector, slot); \
|
||||
\
|
||||
Return(result); \
|
||||
}
|
||||
DEF_COMPARE(LessThan)
|
||||
DEF_COMPARE(LessThanOrEqual)
|
||||
DEF_COMPARE(GreaterThan)
|
||||
DEF_COMPARE(GreaterThanOrEqual)
|
||||
#undef DEF_COMPARE
|
||||
|
||||
TF_BUILTIN(Equal_WithFeedback, CodeStubAssembler) {
|
||||
auto lhs = Parameter<Object>(Descriptor::kLeft);
|
||||
auto rhs = Parameter<Object>(Descriptor::kRight);
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
auto maybe_feedback_vector =
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
|
||||
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
|
||||
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
|
||||
|
||||
TVARIABLE(Smi, var_type_feedback);
|
||||
TNode<Oddball> result = Equal(lhs, rhs, context, &var_type_feedback);
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot);
|
||||
TNode<Oddball> result = Equal(
|
||||
lhs, rhs, [&]() { return context; }, &var_type_feedback);
|
||||
UpdateFeedback(var_type_feedback.value(), feedback_vector, slot);
|
||||
|
||||
Return(result);
|
||||
}
|
||||
@ -104,13 +172,40 @@ TF_BUILTIN(Equal_WithFeedback, CodeStubAssembler) {
|
||||
TF_BUILTIN(StrictEqual_WithFeedback, CodeStubAssembler) {
|
||||
auto lhs = Parameter<Object>(Descriptor::kLeft);
|
||||
auto rhs = Parameter<Object>(Descriptor::kRight);
|
||||
auto maybe_feedback_vector =
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
|
||||
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
|
||||
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
|
||||
|
||||
TVARIABLE(Smi, var_type_feedback);
|
||||
TNode<Oddball> result = StrictEqual(lhs, rhs, &var_type_feedback);
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot);
|
||||
UpdateFeedback(var_type_feedback.value(), feedback_vector, slot);
|
||||
|
||||
Return(result);
|
||||
}
|
||||
|
||||
TF_BUILTIN(Equal_Baseline, CodeStubAssembler) {
|
||||
auto lhs = Parameter<Object>(Descriptor::kLeft);
|
||||
auto rhs = Parameter<Object>(Descriptor::kRight);
|
||||
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
|
||||
|
||||
TVARIABLE(Smi, var_type_feedback);
|
||||
TNode<Oddball> result = Equal(
|
||||
lhs, rhs, [&]() { return LoadContextFromBaseline(); },
|
||||
&var_type_feedback);
|
||||
auto feedback_vector = LoadFeedbackVectorFromBaseline();
|
||||
UpdateFeedback(var_type_feedback.value(), feedback_vector, slot);
|
||||
|
||||
Return(result);
|
||||
}
|
||||
|
||||
TF_BUILTIN(StrictEqual_Baseline, CodeStubAssembler) {
|
||||
auto lhs = Parameter<Object>(Descriptor::kLeft);
|
||||
auto rhs = Parameter<Object>(Descriptor::kRight);
|
||||
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
|
||||
|
||||
TVARIABLE(Smi, var_type_feedback);
|
||||
TNode<Oddball> result = StrictEqual(lhs, rhs, &var_type_feedback);
|
||||
auto feedback_vector = LoadFeedbackVectorFromBaseline();
|
||||
UpdateFeedback(var_type_feedback.value(), feedback_vector, slot);
|
||||
|
||||
Return(result);
|
||||
}
|
||||
|
@ -1171,11 +1171,21 @@ TF_BUILTIN(InstanceOf_WithFeedback, ObjectBuiltinsAssembler) {
|
||||
auto object = Parameter<Object>(Descriptor::kLeft);
|
||||
auto callable = Parameter<Object>(Descriptor::kRight);
|
||||
auto context = Parameter<Context>(Descriptor::kContext);
|
||||
auto maybe_feedback_vector =
|
||||
Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
|
||||
auto feedback_vector = Parameter<HeapObject>(Descriptor::kFeedbackVector);
|
||||
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
|
||||
|
||||
CollectInstanceOfFeedback(callable, context, maybe_feedback_vector, slot);
|
||||
CollectInstanceOfFeedback(callable, context, feedback_vector, slot);
|
||||
Return(InstanceOf(object, callable, context));
|
||||
}
|
||||
|
||||
TF_BUILTIN(InstanceOf_Baseline, ObjectBuiltinsAssembler) {
|
||||
auto object = Parameter<Object>(Descriptor::kLeft);
|
||||
auto callable = Parameter<Object>(Descriptor::kRight);
|
||||
auto context = LoadContextFromBaseline();
|
||||
auto feedback_vector = LoadFeedbackVectorFromBaseline();
|
||||
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
|
||||
|
||||
CollectInstanceOfFeedback(callable, context, feedback_vector, slot);
|
||||
Return(InstanceOf(object, callable, context));
|
||||
}
|
||||
|
||||
|
@ -6,11 +6,11 @@
|
||||
|
||||
namespace runtime {
|
||||
extern runtime CreateArrayLiteral(
|
||||
Context, FeedbackVector, TaggedIndex, ArrayBoilerplateDescription,
|
||||
Smi): HeapObject;
|
||||
Context, Undefined | FeedbackVector, TaggedIndex,
|
||||
ArrayBoilerplateDescription, Smi): HeapObject;
|
||||
extern runtime CreateObjectLiteral(
|
||||
Context, FeedbackVector, TaggedIndex, ObjectBoilerplateDescription,
|
||||
Smi): HeapObject;
|
||||
Context, Undefined | FeedbackVector, TaggedIndex,
|
||||
ObjectBoilerplateDescription, Smi): HeapObject;
|
||||
}
|
||||
|
||||
namespace constructor {
|
||||
@ -22,8 +22,8 @@ extern enum AllocationSiteMode {
|
||||
TRACK_ALLOCATION_SITE
|
||||
}
|
||||
|
||||
const kIsShallowAndDisableMementos: constexpr int31
|
||||
generates 'AggregateLiteral::Flags::kIsShallowAndDisableMementos';
|
||||
const kIsShallow: constexpr int31
|
||||
generates 'AggregateLiteral::Flags::kIsShallow';
|
||||
const kEvalScope: constexpr ScopeType generates 'ScopeType::EVAL_SCOPE';
|
||||
const kFunctionScope:
|
||||
constexpr ScopeType generates 'ScopeType::FUNCTION_SCOPE';
|
||||
@ -60,17 +60,18 @@ builtin CreateRegExpLiteral(implicit context: Context)(
|
||||
}
|
||||
|
||||
builtin CreateShallowArrayLiteral(implicit context: Context)(
|
||||
feedbackVector: FeedbackVector, slot: TaggedIndex,
|
||||
maybeFeedbackVector: Undefined|FeedbackVector, slot: TaggedIndex,
|
||||
constantElements: ArrayBoilerplateDescription): HeapObject {
|
||||
try {
|
||||
const vector = Cast<FeedbackVector>(maybeFeedbackVector)
|
||||
otherwise CallRuntime;
|
||||
return CreateShallowArrayLiteral(
|
||||
feedbackVector, slot, context,
|
||||
AllocationSiteMode::DONT_TRACK_ALLOCATION_SITE)
|
||||
vector, slot, context, AllocationSiteMode::TRACK_ALLOCATION_SITE)
|
||||
otherwise CallRuntime;
|
||||
} label CallRuntime deferred {
|
||||
tail runtime::CreateArrayLiteral(
|
||||
context, feedbackVector, slot, constantElements,
|
||||
SmiConstant(kIsShallowAndDisableMementos));
|
||||
context, maybeFeedbackVector, slot, constantElements,
|
||||
SmiConstant(kIsShallow));
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,14 +81,16 @@ builtin CreateEmptyArrayLiteral(implicit context: Context)(
|
||||
}
|
||||
|
||||
builtin CreateShallowObjectLiteral(implicit context: Context)(
|
||||
feedbackVector: FeedbackVector, slot: TaggedIndex,
|
||||
maybeFeedbackVector: Undefined|FeedbackVector, slot: TaggedIndex,
|
||||
desc: ObjectBoilerplateDescription, flags: Smi): HeapObject {
|
||||
try {
|
||||
const feedbackVector = Cast<FeedbackVector>(maybeFeedbackVector)
|
||||
otherwise CallRuntime;
|
||||
return CreateShallowObjectLiteral(feedbackVector, slot)
|
||||
otherwise CallRuntime;
|
||||
} label CallRuntime deferred {
|
||||
tail runtime::CreateObjectLiteral(
|
||||
context, feedbackVector, slot, desc, flags);
|
||||
context, maybeFeedbackVector, slot, desc, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -47,28 +47,23 @@ builtin BytecodeBudgetInterruptFromCode(implicit context: Context)(
|
||||
tail runtime::BytecodeBudgetInterruptFromCode(feedbackCell);
|
||||
}
|
||||
|
||||
extern transitioning macro ForInPrepareForTorque(
|
||||
Map | FixedArray, uintptr, Undefined | FeedbackVector): FixedArray;
|
||||
|
||||
transitioning builtin ForInPrepare(implicit _context: Context)(
|
||||
enumerator: Map|FixedArray, slot: uintptr,
|
||||
maybeFeedbackVector: Undefined|FeedbackVector): FixedArray {
|
||||
return ForInPrepareForTorque(enumerator, slot, maybeFeedbackVector);
|
||||
}
|
||||
|
||||
extern transitioning builtin ForInFilter(implicit context: Context)(
|
||||
JSAny, HeapObject): JSAny;
|
||||
extern enum ForInFeedback extends uint31 { kAny, ...}
|
||||
extern macro UpdateFeedback(
|
||||
SmiTagged<ForInFeedback>, Undefined | FeedbackVector, uintptr);
|
||||
extern macro UpdateFeedback(SmiTagged<ForInFeedback>, FeedbackVector, uintptr);
|
||||
extern macro MaybeUpdateFeedback(
|
||||
SmiTagged<ForInFeedback>, Undefined | FeedbackVector, uintptr,
|
||||
constexpr bool);
|
||||
|
||||
@export
|
||||
transitioning macro ForInNextSlow(
|
||||
context: Context, slot: uintptr, receiver: JSAnyNotSmi, key: JSAny,
|
||||
cacheType: Object, maybeFeedbackVector: Undefined|FeedbackVector): JSAny {
|
||||
cacheType: Object, maybeFeedbackVector: Undefined|FeedbackVector,
|
||||
guaranteedFeedback: constexpr bool): JSAny {
|
||||
assert(receiver.map != cacheType); // Handled on the fast path.
|
||||
UpdateFeedback(
|
||||
SmiTag<ForInFeedback>(ForInFeedback::kAny), maybeFeedbackVector, slot);
|
||||
MaybeUpdateFeedback(
|
||||
SmiTag<ForInFeedback>(ForInFeedback::kAny), maybeFeedbackVector, slot,
|
||||
guaranteedFeedback);
|
||||
return ForInFilter(key, receiver);
|
||||
}
|
||||
|
||||
@ -77,7 +72,7 @@ transitioning macro ForInNextSlow(
|
||||
transitioning builtin ForInNext(
|
||||
context: Context, slot: uintptr, receiver: JSAnyNotSmi,
|
||||
cacheArray: FixedArray, cacheType: Object, cacheIndex: Smi,
|
||||
maybeFeedbackVector: Undefined|FeedbackVector): JSAny {
|
||||
feedbackVector: FeedbackVector): JSAny {
|
||||
// Load the next key from the enumeration array.
|
||||
const key = UnsafeCast<JSAny>(cacheArray.objects[cacheIndex]);
|
||||
|
||||
@ -87,7 +82,7 @@ transitioning builtin ForInNext(
|
||||
}
|
||||
|
||||
return ForInNextSlow(
|
||||
context, slot, receiver, key, cacheType, maybeFeedbackVector);
|
||||
context, slot, receiver, key, cacheType, feedbackVector, true);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
@ -74,6 +74,20 @@ transitioning builtin GetIteratorWithFeedback(
|
||||
context, receiver, iteratorMethod, callSlotSmi, maybeFeedbackVector);
|
||||
}
|
||||
|
||||
extern macro LoadFeedbackVectorFromBaseline(): FeedbackVector;
|
||||
|
||||
transitioning builtin GetIteratorBaseline(
|
||||
context: Context, receiver: JSAny, loadSlot: TaggedIndex,
|
||||
callSlot: TaggedIndex): JSAny {
|
||||
const feedback: FeedbackVector = LoadFeedbackVectorFromBaseline();
|
||||
const iteratorMethod: JSAny =
|
||||
LoadIC(context, receiver, IteratorSymbolConstant(), loadSlot, feedback);
|
||||
// TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
|
||||
const callSlotSmi: Smi = TaggedIndexToSmi(callSlot);
|
||||
return CallIteratorWithFeedback(
|
||||
context, receiver, iteratorMethod, callSlotSmi, feedback);
|
||||
}
|
||||
|
||||
transitioning builtin CallIteratorWithFeedback(
|
||||
context: Context, receiver: JSAny, iteratorMethod: JSAny, callSlot: Smi,
|
||||
feedback: Undefined|FeedbackVector): JSAny {
|
||||
|
@ -7,7 +7,10 @@
|
||||
#include "src/api/api-arguments.h"
|
||||
#include "src/base/bits-iterator.h"
|
||||
#include "src/base/iterator.h"
|
||||
#include "src/baseline/baseline-compiler.h"
|
||||
#include "src/codegen/code-factory.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/objects/code.h"
|
||||
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
|
||||
#include "src/codegen/macro-assembler-inl.h"
|
||||
#include "src/codegen/register-configuration.h"
|
||||
@ -642,12 +645,18 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
|
||||
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
|
||||
Register sfi_data,
|
||||
Register scratch1) {
|
||||
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
Register sfi_data,
|
||||
Register scratch1,
|
||||
Label* is_baseline) {
|
||||
Label done;
|
||||
|
||||
__ CmpObjectType(sfi_data, INTERPRETER_DATA_TYPE, scratch1);
|
||||
__ LoadMap(scratch1, sfi_data);
|
||||
|
||||
__ CmpInstanceType(scratch1, BASELINE_DATA_TYPE);
|
||||
__ j(equal, is_baseline);
|
||||
|
||||
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
|
||||
__ j(not_equal, &done, Label::kNear);
|
||||
|
||||
__ LoadTaggedPointerField(
|
||||
@ -745,13 +754,22 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
|
||||
// Underlying function needs to have bytecode available.
|
||||
if (FLAG_debug_code) {
|
||||
Label is_baseline, ok;
|
||||
__ LoadTaggedPointerField(
|
||||
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
|
||||
GetSharedFunctionInfoBytecode(masm, rcx, kScratchRegister);
|
||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, rcx, kScratchRegister,
|
||||
&is_baseline);
|
||||
__ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
|
||||
__ Assert(equal, AbortReason::kMissingBytecodeArray);
|
||||
__ jmp(&ok);
|
||||
|
||||
__ bind(&is_baseline);
|
||||
__ CmpObjectType(rcx, BASELINE_DATA_TYPE, rcx);
|
||||
__ Assert(equal, AbortReason::kMissingBytecodeArray);
|
||||
|
||||
__ bind(&ok);
|
||||
}
|
||||
|
||||
// Resume (Ignition/TurboFan) generator object.
|
||||
@ -1046,8 +1064,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ LoadTaggedPointerField(
|
||||
kInterpreterBytecodeArrayRegister,
|
||||
FieldOperand(kScratchRegister, SharedFunctionInfo::kFunctionDataOffset));
|
||||
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
|
||||
kScratchRegister);
|
||||
|
||||
Label is_baseline;
|
||||
GetSharedFunctionInfoBytecodeOrBaseline(
|
||||
masm, kInterpreterBytecodeArrayRegister, kScratchRegister, &is_baseline);
|
||||
|
||||
// The bytecode array could have been flushed from the shared function info,
|
||||
// if so, call into CompileLazy.
|
||||
@ -1065,8 +1085,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
Label push_stack_frame;
|
||||
// Check if feedback vector is valid. If valid, check for optimized code
|
||||
// and update invocation count. Otherwise, setup the stack frame.
|
||||
__ LoadTaggedPointerField(
|
||||
rcx, FieldOperand(feedback_vector, HeapObject::kMapOffset));
|
||||
__ LoadMap(rcx, feedback_vector);
|
||||
__ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
|
||||
__ j(not_equal, &push_stack_frame);
|
||||
|
||||
@ -1253,6 +1272,51 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
|
||||
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15);
|
||||
|
||||
__ bind(&is_baseline);
|
||||
{
|
||||
// Load the feedback vector from the closure.
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
|
||||
|
||||
Label prepare_for_baseline;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
// allocate it.
|
||||
__ LoadMap(rcx, feedback_vector);
|
||||
__ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
|
||||
__ j(not_equal, &prepare_for_baseline);
|
||||
|
||||
// Read off the optimization state in the feedback vector.
|
||||
// TODO(v8:11429): Is this worth doing here? Baseline code will check it
|
||||
// anyway...
|
||||
optimization_state = rcx;
|
||||
__ movl(optimization_state,
|
||||
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
|
||||
// Check if there is optimized code or a optimization marker that needs to
|
||||
// be processed.
|
||||
__ testl(
|
||||
optimization_state,
|
||||
Immediate(
|
||||
FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
|
||||
__ j(not_zero, &has_optimized_code_or_marker);
|
||||
|
||||
// Load the baseline code into the closure.
|
||||
__ LoadTaggedPointerField(rcx,
|
||||
FieldOperand(kInterpreterBytecodeArrayRegister,
|
||||
BaselineData::kBaselineCodeOffset));
|
||||
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
|
||||
ReplaceClosureCodeWithOptimizedCode(masm, rcx, closure,
|
||||
kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister);
|
||||
__ JumpCodeObject(rcx);
|
||||
|
||||
__ bind(&prepare_for_baseline);
|
||||
GenerateTailCallToReturnedCode(masm, Runtime::kPrepareForBaseline);
|
||||
}
|
||||
|
||||
__ bind(&stack_overflow);
|
||||
__ CallRuntime(Runtime::kThrowStackOverflow);
|
||||
__ int3(); // Should not return.
|
||||
@ -1536,6 +1600,161 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
auto descriptor = Builtins::CallInterfaceDescriptorFor(
|
||||
Builtins::kBaselineOutOfLinePrologue);
|
||||
Register closure = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = rbx;
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(feedback_vector,
|
||||
FieldOperand(feedback_vector, Cell::kValueOffset));
|
||||
if (__ emit_debug_code()) {
|
||||
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
|
||||
__ Assert(equal, AbortReason::kExpectedFeedbackVector);
|
||||
}
|
||||
|
||||
__ RecordComment("[ Check optimization state");
|
||||
|
||||
// Read off the optimization state in the feedback vector.
|
||||
Register optimization_state = rcx;
|
||||
__ movl(optimization_state,
|
||||
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
|
||||
// Check if there is optimized code or a optimization marker that needs to
|
||||
// be processed.
|
||||
Label has_optimized_code_or_marker;
|
||||
__ testl(
|
||||
optimization_state,
|
||||
Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
|
||||
__ j(not_zero, &has_optimized_code_or_marker);
|
||||
__ RecordComment("]");
|
||||
|
||||
// Increment invocation count for the function.
|
||||
__ incl(
|
||||
FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
|
||||
|
||||
// Normally r12 is callee saved, but since this isn't a "real" call, we know
|
||||
// that the baseline code doesn't care about r12, so we can reuse it here.
|
||||
Register return_address = r12;
|
||||
|
||||
__ RecordComment("[ Frame Setup");
|
||||
// Save the return address, so that we can push it to the end of the newly
|
||||
// set-up frame once we're done setting it up.
|
||||
__ PopReturnAddressTo(return_address);
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ EnterFrame(StackFrame::MANUAL);
|
||||
|
||||
__ Push(descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kCalleeContext)); // Callee's
|
||||
// context.
|
||||
Register callee_js_function = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
|
||||
DCHECK_EQ(callee_js_function, kJSFunctionRegister);
|
||||
__ Push(callee_js_function); // Callee's JS function.
|
||||
__ Push(descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::
|
||||
kJavaScriptCallArgCount)); // Actual argument
|
||||
// count.
|
||||
|
||||
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
|
||||
// the frame, so load it into a register.
|
||||
Register bytecode_array = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
|
||||
|
||||
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
|
||||
// are 8-bit fields next to each other, so we could just optimize by writing
|
||||
// a 16-bit. These static asserts guard our assumption is valid.
|
||||
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
|
||||
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
|
||||
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
|
||||
__ movw(FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset),
|
||||
Immediate(0));
|
||||
__ Push(bytecode_array);
|
||||
|
||||
// Horrible hack: This should be the bytecode offset, but we calculate that
|
||||
// from the PC, so we cache the feedback vector in there instead.
|
||||
__ Push(feedback_vector);
|
||||
|
||||
__ RecordComment("]");
|
||||
|
||||
__ RecordComment("[ Stack/interrupt check");
|
||||
Label call_stack_guard;
|
||||
{
|
||||
// Stack check. This folds the checks for both the interrupt stack limit
|
||||
// check and the real stack limit into one by just checking for the
|
||||
// interrupt limit. The interrupt limit is either equal to the real stack
|
||||
// limit or tighter. By ensuring we have space until that limit after
|
||||
// building the frame we can quickly precheck both at once.
|
||||
//
|
||||
// TODO(v8:11429): Backport this folded check to the
|
||||
// InterpreterEntryTrampoline.
|
||||
Register frame_size = r11;
|
||||
__ movzxwl(frame_size,
|
||||
FieldOperand(bytecode_array, BytecodeArray::kFrameSizeOffset));
|
||||
__ Move(kScratchRegister, rsp);
|
||||
DCHECK_NE(frame_size, kJavaScriptCallNewTargetRegister);
|
||||
__ subq(kScratchRegister, frame_size);
|
||||
__ cmpq(kScratchRegister,
|
||||
__ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
|
||||
__ j(below, &call_stack_guard);
|
||||
__ RecordComment("]");
|
||||
}
|
||||
|
||||
// Push the return address back onto the stack for return.
|
||||
__ PushReturnAddressFrom(return_address);
|
||||
// Do "fast" return to caller pushed pc.
|
||||
__ Ret();
|
||||
|
||||
__ RecordComment("[ Optimized marker check");
|
||||
__ bind(&has_optimized_code_or_marker);
|
||||
{
|
||||
// TODO(v8:11429,verwaest): Overwrite return address instead.
|
||||
// Drop the return adress
|
||||
__ Drop(1);
|
||||
Register optimization_state = rcx;
|
||||
|
||||
// TODO(v8:11429): Extract to helper.
|
||||
Label maybe_has_optimized_code;
|
||||
__ testl(
|
||||
optimization_state,
|
||||
Immediate(
|
||||
FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
|
||||
__ j(zero, &maybe_has_optimized_code);
|
||||
|
||||
Register optimization_marker = optimization_state;
|
||||
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
|
||||
MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
|
||||
|
||||
__ bind(&maybe_has_optimized_code);
|
||||
Register optimized_code_entry = optimization_state;
|
||||
__ LoadAnyTaggedField(
|
||||
optimized_code_entry,
|
||||
FieldOperand(feedback_vector,
|
||||
FeedbackVector::kMaybeOptimizedCodeOffset));
|
||||
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15);
|
||||
__ Trap();
|
||||
}
|
||||
|
||||
__ bind(&call_stack_guard);
|
||||
{
|
||||
__ RecordComment("[ Stack/interrupt call");
|
||||
// Save incoming new target or generator
|
||||
__ Push(kJavaScriptCallNewTargetRegister);
|
||||
__ CallRuntime(Runtime::kStackGuard, 0);
|
||||
__ Pop(kJavaScriptCallNewTargetRegister);
|
||||
|
||||
// Push the return address back onto the stack for return.
|
||||
__ PushReturnAddressFrom(return_address);
|
||||
__ Ret();
|
||||
__ RecordComment("]");
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
|
||||
bool java_script_builtin,
|
||||
@ -1623,6 +1842,11 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
|
||||
__ ret(1 * kSystemPointerSize); // Remove rax.
|
||||
}
|
||||
|
||||
void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
|
||||
Register optimized_code_entry = kJavaScriptCallCodeStartRegister;
|
||||
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15);
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
@ -2409,7 +2633,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
||||
RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
namespace {
|
||||
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
|
||||
@ -2423,9 +2648,11 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
|
||||
__ bind(&skip);
|
||||
|
||||
// Drop the handler frame that is be sitting on top of the actual
|
||||
// JavaScript frame. This is the case then OSR is triggered from bytecode.
|
||||
__ leave();
|
||||
if (is_interpreter) {
|
||||
// Drop the handler frame that is be sitting on top of the actual
|
||||
// JavaScript frame. This is the case then OSR is triggered from bytecode.
|
||||
__ leave();
|
||||
}
|
||||
|
||||
// Load deoptimization data from the code object.
|
||||
__ LoadTaggedPointerField(rbx,
|
||||
@ -2445,6 +2672,15 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
// And "return" to the OSR entry point of the function.
|
||||
__ ret(0);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
return OnStackReplacement(masm, true);
|
||||
}
|
||||
|
||||
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
|
||||
return OnStackReplacement(masm, false);
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||
// The function index was pushed to the stack by the caller as int32.
|
||||
|
@ -86,6 +86,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
|
||||
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
|
||||
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
|
||||
|
||||
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
}
|
||||
const Register BaselineLeaveFrameDescriptor::WeightRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// static
|
||||
const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
|
||||
|
||||
@ -209,12 +218,24 @@ void CompareDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void Compare_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
}
|
||||
|
||||
void BinaryOpDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {r1, r0};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
}
|
||||
|
||||
void ApiCallbackDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
|
@ -86,6 +86,9 @@ const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
|
||||
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
|
||||
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
|
||||
|
||||
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { return x3; }
|
||||
const Register BaselineLeaveFrameDescriptor::WeightRegister() { return x4; }
|
||||
|
||||
// static
|
||||
const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
|
||||
|
||||
@ -211,6 +214,15 @@ void CompareDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void Compare_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// x1: left operand
|
||||
// x0: right operand
|
||||
// x2: feedback slot
|
||||
Register registers[] = {x1, x0, x2};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void BinaryOpDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// x1: left operand
|
||||
@ -219,6 +231,15 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// x1: left operand
|
||||
// x0: right operand
|
||||
// x2: feedback slot
|
||||
Register registers[] = {x1, x0, x2};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void ApiCallbackDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
|
@ -1277,23 +1277,6 @@ void TurboAssembler::PopCPURegList(CPURegList registers) {
|
||||
#endif
|
||||
}
|
||||
|
||||
void TurboAssembler::Push(Handle<HeapObject> handle) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register tmp = temps.AcquireX();
|
||||
Mov(tmp, Operand(handle));
|
||||
// This is only used in test-heap.cc, for generating code that is not
|
||||
// executed. Push a padding slot together with the handle here, to
|
||||
// satisfy the alignment requirement.
|
||||
Push(padreg, tmp);
|
||||
}
|
||||
|
||||
void TurboAssembler::Push(Smi smi) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register tmp = temps.AcquireX();
|
||||
Mov(tmp, Operand(smi));
|
||||
Push(tmp);
|
||||
}
|
||||
|
||||
void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
|
||||
DCHECK_GE(count, 0);
|
||||
uint64_t size = count * unit_size;
|
||||
|
@ -1410,7 +1410,19 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
|
||||
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
|
||||
}
|
||||
|
||||
void TurboAssembler::PushRoot(RootIndex index) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register tmp = temps.AcquireX();
|
||||
LoadRoot(tmp, index);
|
||||
Push(tmp);
|
||||
}
|
||||
|
||||
void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); }
|
||||
void TurboAssembler::Move(Register dst, MemOperand src) { Ldr(dst, src); }
|
||||
void TurboAssembler::Move(Register dst, Register src) {
|
||||
if (dst == src) return;
|
||||
Mov(dst, src);
|
||||
}
|
||||
|
||||
void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
|
||||
Register src1) {
|
||||
@ -1889,9 +1901,14 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
|
||||
|
||||
void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
|
||||
Register destination) {
|
||||
Ldr(destination,
|
||||
MemOperand(kRootRegister,
|
||||
IsolateData::builtin_entry_slot_offset(builtin_index)));
|
||||
Ldr(destination, EntryFromBuiltinIndexAsOperand(builtin_index));
|
||||
}
|
||||
|
||||
MemOperand TurboAssembler::EntryFromBuiltinIndexAsOperand(
|
||||
Builtins::Name builtin_index) {
|
||||
DCHECK(root_array_available());
|
||||
return MemOperand(kRootRegister,
|
||||
IsolateData::builtin_entry_slot_offset(builtin_index));
|
||||
}
|
||||
|
||||
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
|
||||
@ -2462,8 +2479,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||
// sp[2] : fp
|
||||
// sp[1] : type
|
||||
// sp[0] : for alignment
|
||||
} else {
|
||||
DCHECK_EQ(type, StackFrame::CONSTRUCT);
|
||||
} else if (type == StackFrame::CONSTRUCT) {
|
||||
Register type_reg = temps.AcquireX();
|
||||
Mov(type_reg, StackFrame::TypeToMarker(type));
|
||||
|
||||
@ -2479,6 +2495,14 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||
// sp[2] : fp
|
||||
// sp[1] : type
|
||||
// sp[0] : cp
|
||||
} else {
|
||||
DCHECK_EQ(type, StackFrame::MANUAL);
|
||||
// Just push a minimal "machine frame", saving the frame pointer and return
|
||||
// address, without any markers.
|
||||
Push<TurboAssembler::kSignLR>(lr, fp);
|
||||
Mov(fp, sp);
|
||||
// sp[1] : lr
|
||||
// sp[0] : fp
|
||||
}
|
||||
}
|
||||
|
||||
@ -2740,6 +2764,15 @@ void TurboAssembler::LoadAnyTaggedField(const Register& destination,
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::LoadTaggedSignedField(const Register& destination,
|
||||
const MemOperand& field_operand) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressTaggedSigned(destination, field_operand);
|
||||
} else {
|
||||
Ldr(destination, field_operand);
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
|
||||
SmiUntag(dst, src);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "src/codegen/arm64/assembler-arm64.h"
|
||||
#include "src/codegen/bailout-reason.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/objects/tagged-index.h"
|
||||
|
||||
// Simulator specific helpers.
|
||||
#if USE_SIMULATOR
|
||||
@ -200,9 +201,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
mov(rd, vn, vn_index);
|
||||
}
|
||||
|
||||
// This is required for compatibility with architecture independent code.
|
||||
// These are required for compatibility with architecture independent code.
|
||||
// Remove if not needed.
|
||||
void Move(Register dst, Smi src);
|
||||
void Move(Register dst, MemOperand src);
|
||||
void Move(Register dst, Register src);
|
||||
|
||||
// Move src0 to dst0 and src1 to dst1, handling possible overlaps.
|
||||
void MovePair(Register dst0, Register src0, Register dst1, Register src1);
|
||||
@ -834,14 +837,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
template <StoreLRMode lr_mode = kDontStoreLR>
|
||||
void Push(const Register& src0, const VRegister& src1);
|
||||
|
||||
// This is a convenience method for pushing a single Handle<Object>.
|
||||
inline void Push(Handle<HeapObject> object);
|
||||
inline void Push(Smi smi);
|
||||
|
||||
// Aliases of Push and Pop, required for V8 compatibility.
|
||||
inline void push(Register src) { Push(src); }
|
||||
inline void pop(Register dst) { Pop(dst); }
|
||||
|
||||
void SaveRegisters(RegList registers);
|
||||
void RestoreRegisters(RegList registers);
|
||||
|
||||
@ -978,6 +973,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void LoadEntryFromBuiltinIndex(Register builtin_index);
|
||||
void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
|
||||
Register destination);
|
||||
MemOperand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
|
||||
void CallBuiltinByIndex(Register builtin_index) override;
|
||||
void CallBuiltin(int builtin_index);
|
||||
|
||||
@ -1271,6 +1267,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
|
||||
// Load an object from the root table.
|
||||
void LoadRoot(Register destination, RootIndex index) override;
|
||||
void PushRoot(RootIndex index);
|
||||
|
||||
inline void Ret(const Register& xn = lr);
|
||||
|
||||
@ -1351,6 +1348,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void LoadAnyTaggedField(const Register& destination,
|
||||
const MemOperand& field_operand);
|
||||
|
||||
// Loads a field containing a tagged signed value and decompresses it if
|
||||
// necessary.
|
||||
void LoadTaggedSignedField(const Register& destination,
|
||||
const MemOperand& field_operand);
|
||||
|
||||
// Loads a field containing smi value and untags it.
|
||||
void SmiUntagField(Register dst, const MemOperand& src);
|
||||
|
||||
|
@ -21,6 +21,7 @@ namespace internal {
|
||||
V(kExpectedOptimizationSentinel, \
|
||||
"Expected optimized code cell or optimization sentinel") \
|
||||
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
|
||||
V(kExpectedFeedbackVector, "Expected feedback vector") \
|
||||
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
|
||||
"The function_data field should be a BytecodeArray on interpreter entry") \
|
||||
V(kInputStringTooLong, "Input string too long") \
|
||||
|
@ -87,20 +87,5 @@ uint32_t CodeCommentsWriter::section_size() const {
|
||||
return kOffsetToFirstCommentEntry + static_cast<uint32_t>(byte_count_);
|
||||
}
|
||||
|
||||
void PrintCodeCommentsSection(std::ostream& out, Address code_comments_start,
|
||||
uint32_t code_comments_size) {
|
||||
CodeCommentsIterator it(code_comments_start, code_comments_size);
|
||||
out << "CodeComments (size = " << it.size() << ")\n";
|
||||
if (it.HasCurrent()) {
|
||||
out << std::setw(6) << "pc" << std::setw(6) << "len"
|
||||
<< " comment\n";
|
||||
}
|
||||
for (; it.HasCurrent(); it.Next()) {
|
||||
out << std::hex << std::setw(6) << it.GetPCOffset() << std::dec
|
||||
<< std::setw(6) << it.GetCommentSize() << " (" << it.GetComment()
|
||||
<< ")\n";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -62,9 +62,6 @@ class V8_EXPORT_PRIVATE CodeCommentsIterator {
|
||||
Address current_entry_;
|
||||
};
|
||||
|
||||
void PrintCodeCommentsSection(std::ostream& out, Address code_comments_start,
|
||||
uint32_t code_comments_size);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -2739,11 +2739,22 @@ TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
|
||||
shared, SharedFunctionInfo::kFunctionDataOffset);
|
||||
|
||||
TVARIABLE(HeapObject, var_result, function_data);
|
||||
|
||||
Label check_for_interpreter_data(this, &var_result);
|
||||
Label done(this, &var_result);
|
||||
|
||||
GotoIfNot(HasInstanceType(function_data, INTERPRETER_DATA_TYPE), &done);
|
||||
GotoIfNot(HasInstanceType(var_result.value(), BASELINE_DATA_TYPE),
|
||||
&check_for_interpreter_data);
|
||||
TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
|
||||
var_result.value(), BaselineData::kDataOffset);
|
||||
var_result = baseline_data;
|
||||
Goto(&check_for_interpreter_data);
|
||||
|
||||
BIND(&check_for_interpreter_data);
|
||||
|
||||
GotoIfNot(HasInstanceType(var_result.value(), INTERPRETER_DATA_TYPE), &done);
|
||||
TNode<BytecodeArray> bytecode_array = LoadObjectField<BytecodeArray>(
|
||||
function_data, InterpreterData::kBytecodeArrayOffset);
|
||||
var_result.value(), InterpreterData::kBytecodeArrayOffset);
|
||||
var_result = bytecode_array;
|
||||
Goto(&done);
|
||||
|
||||
@ -2921,7 +2932,8 @@ void CodeStubAssembler::StoreFeedbackVectorSlot(
|
||||
// Check that slot <= feedback_vector.length.
|
||||
CSA_ASSERT(this,
|
||||
IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector),
|
||||
FeedbackVector::kHeaderSize));
|
||||
FeedbackVector::kHeaderSize),
|
||||
SmiFromIntPtr(offset), feedback_vector);
|
||||
if (barrier_mode == SKIP_WRITE_BARRIER) {
|
||||
StoreNoWriteBarrier(MachineRepresentation::kTagged, feedback_vector, offset,
|
||||
value);
|
||||
@ -9629,6 +9641,15 @@ TNode<FeedbackVector> CodeStubAssembler::LoadFeedbackVectorForStub() {
|
||||
return CAST(LoadFeedbackVector(function));
|
||||
}
|
||||
|
||||
TNode<FeedbackVector> CodeStubAssembler::LoadFeedbackVectorFromBaseline() {
|
||||
return CAST(
|
||||
LoadFromParentFrame(InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
}
|
||||
|
||||
TNode<Context> CodeStubAssembler::LoadContextFromBaseline() {
|
||||
return CAST(LoadFromParentFrame(InterpreterFrameConstants::kContextOffset));
|
||||
}
|
||||
|
||||
TNode<FeedbackVector>
|
||||
CodeStubAssembler::LoadFeedbackVectorForStubWithTrampoline() {
|
||||
TNode<RawPtrT> frame_pointer = LoadParentFramePointer();
|
||||
@ -9639,17 +9660,28 @@ CodeStubAssembler::LoadFeedbackVectorForStubWithTrampoline() {
|
||||
return CAST(LoadFeedbackVector(function));
|
||||
}
|
||||
|
||||
void CodeStubAssembler::UpdateFeedback(TNode<Smi> feedback,
|
||||
TNode<HeapObject> maybe_vector,
|
||||
TNode<UintPtrT> slot_id) {
|
||||
void CodeStubAssembler::MaybeUpdateFeedback(TNode<Smi> feedback,
|
||||
TNode<HeapObject> maybe_vector,
|
||||
TNode<UintPtrT> slot_id,
|
||||
bool guaranteed_feedback) {
|
||||
Label end(this);
|
||||
// If feedback_vector is not valid, then nothing to do.
|
||||
// TODO(v8:11429): Use guaranteed_feedback to skip this Goto.
|
||||
GotoIf(IsUndefined(maybe_vector), &end);
|
||||
{
|
||||
UpdateFeedback(feedback, CAST(maybe_vector), slot_id);
|
||||
Goto(&end);
|
||||
}
|
||||
BIND(&end);
|
||||
}
|
||||
void CodeStubAssembler::UpdateFeedback(TNode<Smi> feedback,
|
||||
TNode<FeedbackVector> feedback_vector,
|
||||
TNode<UintPtrT> slot_id) {
|
||||
Label end(this);
|
||||
|
||||
// This method is used for binary op and compare feedback. These
|
||||
// vector nodes are initialized with a smi 0, so we can simply OR
|
||||
// our new feedback in place.
|
||||
TNode<FeedbackVector> feedback_vector = CAST(maybe_vector);
|
||||
TNode<MaybeObject> feedback_element =
|
||||
LoadFeedbackVectorSlot(feedback_vector, slot_id);
|
||||
TNode<Smi> previous_feedback = CAST(feedback_element);
|
||||
@ -10892,9 +10924,48 @@ Operation Reverse(Operation op) {
|
||||
}
|
||||
} // anonymous namespace
|
||||
|
||||
TNode<Context> CodeStubAssembler::GotoIfHasContextExtensionUpToDepth(
|
||||
TNode<Context> context, TNode<Uint32T> depth, Label* target) {
|
||||
TVARIABLE(Context, cur_context, context);
|
||||
TVARIABLE(Uint32T, cur_depth, depth);
|
||||
|
||||
Label context_search(this, {&cur_depth, &cur_context});
|
||||
Label exit_loop(this);
|
||||
Label no_extension(this);
|
||||
|
||||
// Loop until the depth is 0.
|
||||
// TODO(v8:11429): Assert that cur_depth isn't zero to start with.
|
||||
Goto(&context_search);
|
||||
BIND(&context_search);
|
||||
{
|
||||
// Check if context has an extension slot.
|
||||
TNode<BoolT> has_extension =
|
||||
LoadScopeInfoHasExtensionField(LoadScopeInfo(cur_context.value()));
|
||||
GotoIfNot(has_extension, &no_extension);
|
||||
|
||||
// Jump to the target if the extension slot is not an undefined value.
|
||||
TNode<Object> extension_slot =
|
||||
LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
|
||||
Branch(TaggedNotEqual(extension_slot, UndefinedConstant()), target,
|
||||
&no_extension);
|
||||
|
||||
BIND(&no_extension);
|
||||
{
|
||||
cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
|
||||
cur_context = CAST(
|
||||
LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
|
||||
|
||||
Branch(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
|
||||
&context_search, &exit_loop);
|
||||
}
|
||||
}
|
||||
BIND(&exit_loop);
|
||||
return cur_context.value();
|
||||
}
|
||||
|
||||
TNode<Oddball> CodeStubAssembler::RelationalComparison(
|
||||
Operation op, TNode<Object> left, TNode<Object> right,
|
||||
TNode<Context> context, TVariable<Smi>* var_type_feedback) {
|
||||
const LazyNode<Context>& context, TVariable<Smi>* var_type_feedback) {
|
||||
Label return_true(this), return_false(this), do_float_comparison(this),
|
||||
end(this);
|
||||
TVARIABLE(Oddball, var_result); // Actually only "true" or "false".
|
||||
@ -10986,7 +11057,8 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
|
||||
// dedicated ToPrimitive(right, hint Number) operation, as the
|
||||
// ToNumeric(right) will by itself already invoke ToPrimitive with
|
||||
// a Number hint.
|
||||
var_right = CallBuiltin(Builtins::kNonNumberToNumeric, context, right);
|
||||
var_right =
|
||||
CallBuiltin(Builtins::kNonNumberToNumeric, context(), right);
|
||||
Goto(&loop);
|
||||
}
|
||||
}
|
||||
@ -11031,7 +11103,8 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
|
||||
// dedicated ToPrimitive(left, hint Number) operation, as the
|
||||
// ToNumeric(left) will by itself already invoke ToPrimitive with
|
||||
// a Number hint.
|
||||
var_left = CallBuiltin(Builtins::kNonNumberToNumeric, context, left);
|
||||
var_left =
|
||||
CallBuiltin(Builtins::kNonNumberToNumeric, context(), left);
|
||||
Goto(&loop);
|
||||
}
|
||||
}
|
||||
@ -11087,7 +11160,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
|
||||
// ToNumeric(right) will by itself already invoke ToPrimitive with
|
||||
// a Number hint.
|
||||
var_right =
|
||||
CallBuiltin(Builtins::kNonNumberToNumeric, context, right);
|
||||
CallBuiltin(Builtins::kNonNumberToNumeric, context(), right);
|
||||
Goto(&loop);
|
||||
}
|
||||
}
|
||||
@ -11142,7 +11215,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
|
||||
// ToNumeric(right) will by itself already invoke ToPrimitive with
|
||||
// a Number hint.
|
||||
var_right =
|
||||
CallBuiltin(Builtins::kNonNumberToNumeric, context, right);
|
||||
CallBuiltin(Builtins::kNonNumberToNumeric, context(), right);
|
||||
Goto(&loop);
|
||||
}
|
||||
}
|
||||
@ -11174,7 +11247,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
var_result = CAST(CallBuiltin(builtin, context, left, right));
|
||||
var_result = CAST(CallBuiltin(builtin, context(), left, right));
|
||||
Goto(&end);
|
||||
|
||||
BIND(&if_right_not_string);
|
||||
@ -11193,8 +11266,8 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
|
||||
&if_right_receiver);
|
||||
|
||||
var_left =
|
||||
CallBuiltin(Builtins::kNonNumberToNumeric, context, left);
|
||||
var_right = CallBuiltin(Builtins::kToNumeric, context, right);
|
||||
CallBuiltin(Builtins::kNonNumberToNumeric, context(), left);
|
||||
var_right = CallBuiltin(Builtins::kToNumeric, context(), right);
|
||||
Goto(&loop);
|
||||
|
||||
BIND(&if_right_bigint);
|
||||
@ -11209,7 +11282,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
|
||||
{
|
||||
Callable callable = CodeFactory::NonPrimitiveToPrimitive(
|
||||
isolate(), ToPrimitiveHint::kNumber);
|
||||
var_right = CallStub(callable, context, right);
|
||||
var_right = CallStub(callable, context(), right);
|
||||
Goto(&loop);
|
||||
}
|
||||
}
|
||||
@ -11257,15 +11330,16 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
|
||||
GotoIf(IsJSReceiverInstanceType(left_instance_type),
|
||||
&if_left_receiver);
|
||||
|
||||
var_right = CallBuiltin(Builtins::kToNumeric, context, right);
|
||||
var_left = CallBuiltin(Builtins::kNonNumberToNumeric, context, left);
|
||||
var_right = CallBuiltin(Builtins::kToNumeric, context(), right);
|
||||
var_left =
|
||||
CallBuiltin(Builtins::kNonNumberToNumeric, context(), left);
|
||||
Goto(&loop);
|
||||
|
||||
BIND(&if_left_receiver);
|
||||
{
|
||||
Callable callable = CodeFactory::NonPrimitiveToPrimitive(
|
||||
isolate(), ToPrimitiveHint::kNumber);
|
||||
var_left = CallStub(callable, context, left);
|
||||
var_left = CallStub(callable, context(), left);
|
||||
Goto(&loop);
|
||||
}
|
||||
}
|
||||
@ -11422,7 +11496,7 @@ void CodeStubAssembler::GenerateEqual_Same(SloppyTNode<Object> value,
|
||||
// ES6 section 7.2.12 Abstract Equality Comparison
|
||||
TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
|
||||
SloppyTNode<Object> right,
|
||||
TNode<Context> context,
|
||||
const LazyNode<Context>& context,
|
||||
TVariable<Smi>* var_type_feedback) {
|
||||
// This is a slightly optimized version of Object::Equals. Whenever you
|
||||
// change something functionality wise in here, remember to update the
|
||||
@ -11537,7 +11611,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
|
||||
CombineFeedback(var_type_feedback,
|
||||
CompareOperationFeedback::kReceiver);
|
||||
Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
|
||||
var_right = CallStub(callable, context, right);
|
||||
var_right = CallStub(callable, context(), right);
|
||||
Goto(&loop);
|
||||
}
|
||||
}
|
||||
@ -11568,7 +11642,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
|
||||
{
|
||||
GotoIfNot(IsStringInstanceType(right_type), &use_symmetry);
|
||||
result =
|
||||
CAST(CallBuiltin(Builtins::kStringEqual, context, left, right));
|
||||
CAST(CallBuiltin(Builtins::kStringEqual, context(), left, right));
|
||||
CombineFeedback(var_type_feedback,
|
||||
SmiOr(CollectFeedbackForString(left_type),
|
||||
CollectFeedbackForString(right_type)));
|
||||
@ -11813,7 +11887,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
|
||||
// convert {left} to Primitive too.
|
||||
CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
|
||||
Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
|
||||
var_left = CallStub(callable, context, left);
|
||||
var_left = CallStub(callable, context(), left);
|
||||
Goto(&loop);
|
||||
}
|
||||
}
|
||||
@ -11828,7 +11902,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
|
||||
CombineFeedback(var_type_feedback,
|
||||
CollectFeedbackForString(right_type));
|
||||
}
|
||||
var_right = CallBuiltin(Builtins::kStringToNumber, context, right);
|
||||
var_right = CallBuiltin(Builtins::kStringToNumber, context(), right);
|
||||
Goto(&loop);
|
||||
}
|
||||
|
||||
@ -12455,7 +12529,8 @@ void CodeStubAssembler::ForInPrepare(TNode<HeapObject> enumerator,
|
||||
TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector,
|
||||
TNode<FixedArray>* cache_array_out,
|
||||
TNode<Smi>* cache_length_out) {
|
||||
TNode<Smi>* cache_length_out,
|
||||
bool guaranteed_feedback) {
|
||||
// Check if we're using an enum cache.
|
||||
TVARIABLE(FixedArray, cache_array);
|
||||
TVARIABLE(Smi, cache_length);
|
||||
@ -12484,7 +12559,8 @@ void CodeStubAssembler::ForInPrepare(TNode<HeapObject> enumerator,
|
||||
IntPtrLessThanOrEqual(enum_length, enum_indices_length),
|
||||
static_cast<int>(ForInFeedback::kEnumCacheKeysAndIndices),
|
||||
static_cast<int>(ForInFeedback::kEnumCacheKeys));
|
||||
UpdateFeedback(feedback, maybe_feedback_vector, slot);
|
||||
MaybeUpdateFeedback(feedback, maybe_feedback_vector, slot,
|
||||
guaranteed_feedback);
|
||||
|
||||
cache_array = enum_keys;
|
||||
cache_length = SmiTag(Signed(enum_length));
|
||||
@ -12497,8 +12573,8 @@ void CodeStubAssembler::ForInPrepare(TNode<HeapObject> enumerator,
|
||||
TNode<FixedArray> array_enumerator = CAST(enumerator);
|
||||
|
||||
// Record the fact that we hit the for-in slow-path.
|
||||
UpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
|
||||
slot);
|
||||
MaybeUpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
|
||||
slot, guaranteed_feedback);
|
||||
|
||||
cache_array = array_enumerator;
|
||||
cache_length = LoadFixedArrayBaseLength(array_enumerator);
|
||||
@ -12510,21 +12586,6 @@ void CodeStubAssembler::ForInPrepare(TNode<HeapObject> enumerator,
|
||||
*cache_length_out = cache_length.value();
|
||||
}
|
||||
|
||||
TNode<FixedArray> CodeStubAssembler::ForInPrepareForTorque(
|
||||
TNode<HeapObject> enumerator, TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector) {
|
||||
TNode<FixedArray> cache_array;
|
||||
TNode<Smi> cache_length;
|
||||
ForInPrepare(enumerator, slot, maybe_feedback_vector, &cache_array,
|
||||
&cache_length);
|
||||
|
||||
TNode<FixedArray> result = AllocateUninitializedFixedArray(2);
|
||||
StoreFixedArrayElement(result, 0, cache_array);
|
||||
StoreFixedArrayElement(result, 1, cache_length);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
TNode<String> CodeStubAssembler::Typeof(SloppyTNode<Object> value) {
|
||||
TVARIABLE(String, result_var);
|
||||
|
||||
@ -13284,6 +13345,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
|
||||
TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));
|
||||
|
||||
int32_t case_values[] = {BYTECODE_ARRAY_TYPE,
|
||||
BASELINE_DATA_TYPE,
|
||||
WASM_EXPORTED_FUNCTION_DATA_TYPE,
|
||||
ASM_WASM_DATA_TYPE,
|
||||
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
|
||||
@ -13292,6 +13354,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
|
||||
WASM_JS_FUNCTION_DATA_TYPE,
|
||||
WASM_CAPI_FUNCTION_DATA_TYPE};
|
||||
Label check_is_bytecode_array(this);
|
||||
Label check_is_baseline_data(this);
|
||||
Label check_is_exported_function_data(this);
|
||||
Label check_is_asm_wasm_data(this);
|
||||
Label check_is_uncompiled_data_without_preparse_data(this);
|
||||
@ -13301,6 +13364,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
|
||||
Label check_is_wasm_js_function_data(this);
|
||||
Label check_is_wasm_capi_function_data(this);
|
||||
Label* case_labels[] = {&check_is_bytecode_array,
|
||||
&check_is_baseline_data,
|
||||
&check_is_exported_function_data,
|
||||
&check_is_asm_wasm_data,
|
||||
&check_is_uncompiled_data_without_preparse_data,
|
||||
@ -13317,6 +13381,14 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
|
||||
sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InterpreterEntryTrampoline));
|
||||
Goto(&done);
|
||||
|
||||
// IsBaselineData: Execute baseline code
|
||||
BIND(&check_is_baseline_data);
|
||||
TNode<BaselineData> baseline_data = CAST(sfi_data);
|
||||
TNode<Code> baseline_code =
|
||||
CAST(LoadObjectField(baseline_data, BaselineData::kBaselineCodeOffset));
|
||||
sfi_code = baseline_code;
|
||||
Goto(&done);
|
||||
|
||||
// IsWasmExportedFunctionData: Use the wrapper code
|
||||
BIND(&check_is_exported_function_data);
|
||||
sfi_code = CAST(LoadObjectField(
|
||||
|
@ -3144,6 +3144,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
|
||||
// Load type feedback vector from the stub caller's frame.
|
||||
TNode<FeedbackVector> LoadFeedbackVectorForStub();
|
||||
TNode<FeedbackVector> LoadFeedbackVectorFromBaseline();
|
||||
TNode<Context> LoadContextFromBaseline();
|
||||
// Load type feedback vector from the stub caller's frame, skipping an
|
||||
// intermediate trampoline frame.
|
||||
TNode<FeedbackVector> LoadFeedbackVectorForStubWithTrampoline();
|
||||
@ -3163,9 +3165,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
TNode<ClosureFeedbackCellArray> LoadClosureFeedbackArray(
|
||||
TNode<JSFunction> closure);
|
||||
|
||||
// TODO(v8:11429): Change bool to enum.
|
||||
void MaybeUpdateFeedback(TNode<Smi> feedback,
|
||||
TNode<HeapObject> maybe_feedback_vector,
|
||||
TNode<UintPtrT> slot_id, bool guaranteed_feedback);
|
||||
// Update the type feedback vector.
|
||||
void UpdateFeedback(TNode<Smi> feedback,
|
||||
TNode<HeapObject> maybe_feedback_vector,
|
||||
TNode<FeedbackVector> feedback_vector,
|
||||
TNode<UintPtrT> slot_id);
|
||||
|
||||
// Report that there was a feedback update, performing any tasks that should
|
||||
@ -3315,9 +3321,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
TNode<IntPtrT> start_offset,
|
||||
TNode<IntPtrT> end_offset, RootIndex root);
|
||||
|
||||
// Goto the given |target| if the context chain starting at |context| has any
|
||||
// extensions up to the given |depth|. Returns the Context with the
|
||||
// extensions if there was one, otherwise returns the Context at the given
|
||||
// |depth|.
|
||||
TNode<Context> GotoIfHasContextExtensionUpToDepth(TNode<Context> context,
|
||||
TNode<Uint32T> depth,
|
||||
Label* target);
|
||||
|
||||
TNode<Oddball> RelationalComparison(
|
||||
Operation op, TNode<Object> left, TNode<Object> right,
|
||||
TNode<Context> context, TVariable<Smi>* var_type_feedback = nullptr);
|
||||
TNode<Context> context, TVariable<Smi>* var_type_feedback = nullptr) {
|
||||
return RelationalComparison(
|
||||
op, left, right, [=]() { return context; }, var_type_feedback);
|
||||
}
|
||||
|
||||
TNode<Oddball> RelationalComparison(
|
||||
Operation op, TNode<Object> left, TNode<Object> right,
|
||||
const LazyNode<Context>& context,
|
||||
TVariable<Smi>* var_type_feedback = nullptr);
|
||||
|
||||
void BranchIfNumberRelationalComparison(Operation op, TNode<Number> left,
|
||||
TNode<Number> right, Label* if_true,
|
||||
@ -3369,6 +3391,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
|
||||
TNode<Oddball> Equal(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
|
||||
TNode<Context> context,
|
||||
TVariable<Smi>* var_type_feedback = nullptr) {
|
||||
return Equal(
|
||||
lhs, rhs, [=]() { return context; }, var_type_feedback);
|
||||
}
|
||||
TNode<Oddball> Equal(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
|
||||
const LazyNode<Context>& context,
|
||||
TVariable<Smi>* var_type_feedback = nullptr);
|
||||
|
||||
TNode<Oddball> StrictEqual(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
|
||||
@ -3404,14 +3432,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
void ForInPrepare(TNode<HeapObject> enumerator, TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector,
|
||||
TNode<FixedArray>* cache_array_out,
|
||||
TNode<Smi>* cache_length_out);
|
||||
// Returns {cache_array} and {cache_length} in a fixed array of length 2.
|
||||
// TODO(jgruber): Tuple2 would be a slightly better fit as the return type,
|
||||
// but FixedArray has better support and there are no effective drawbacks to
|
||||
// using it instead of Tuple2 in practice.
|
||||
TNode<FixedArray> ForInPrepareForTorque(
|
||||
TNode<HeapObject> enumerator, TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector);
|
||||
TNode<Smi>* cache_length_out, bool guaranteed_feedback);
|
||||
|
||||
TNode<String> Typeof(SloppyTNode<Object> value);
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include "src/ast/scopes.h"
|
||||
#include "src/base/logging.h"
|
||||
#include "src/base/optional.h"
|
||||
#include "src/baseline/baseline.h"
|
||||
#include "src/codegen/assembler-inl.h"
|
||||
#include "src/codegen/compilation-cache.h"
|
||||
#include "src/codegen/optimized-compilation-info.h"
|
||||
@ -171,13 +172,13 @@ struct ScopedTimer {
|
||||
base::TimeDelta* location_;
|
||||
};
|
||||
|
||||
namespace {
|
||||
|
||||
void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
|
||||
Handle<SharedFunctionInfo> shared,
|
||||
Handle<Script> script,
|
||||
Handle<AbstractCode> abstract_code, bool optimizing,
|
||||
double time_taken_ms, Isolate* isolate) {
|
||||
// static
|
||||
void Compiler::LogFunctionCompilation(Isolate* isolate,
|
||||
CodeEventListener::LogEventsAndTags tag,
|
||||
Handle<SharedFunctionInfo> shared,
|
||||
Handle<Script> script,
|
||||
Handle<AbstractCode> abstract_code,
|
||||
CodeKind kind, double time_taken_ms) {
|
||||
DCHECK(!abstract_code.is_null());
|
||||
DCHECK(!abstract_code.is_identical_to(BUILTIN_CODE(isolate, CompileLazy)));
|
||||
|
||||
@ -202,7 +203,23 @@ void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
|
||||
line_num, column_num));
|
||||
if (!FLAG_log_function_events) return;
|
||||
|
||||
std::string name = optimizing ? "optimize" : "compile";
|
||||
std::string name;
|
||||
switch (kind) {
|
||||
case CodeKind::INTERPRETED_FUNCTION:
|
||||
name = "interpreter";
|
||||
break;
|
||||
case CodeKind::SPARKPLUG:
|
||||
name = "baseline";
|
||||
break;
|
||||
case CodeKind::TURBOPROP:
|
||||
name = "turboprop";
|
||||
break;
|
||||
case CodeKind::TURBOFAN:
|
||||
name = "optimize";
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
switch (tag) {
|
||||
case CodeEventListener::EVAL_TAG:
|
||||
name += "-eval";
|
||||
@ -225,6 +242,8 @@ void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
|
||||
*debug_name));
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
ScriptOriginOptions OriginOptionsForEval(Object script) {
|
||||
if (!script.IsScript()) return ScriptOriginOptions();
|
||||
|
||||
@ -304,8 +323,9 @@ void RecordUnoptimizedFunctionCompilation(
|
||||
time_taken_to_finalize.InMillisecondsF();
|
||||
|
||||
Handle<Script> script(Script::cast(shared->script()), isolate);
|
||||
LogFunctionCompilation(tag, shared, script, abstract_code, false,
|
||||
time_taken_ms, isolate);
|
||||
Compiler::LogFunctionCompilation(isolate, tag, shared, script, abstract_code,
|
||||
CodeKind::INTERPRETED_FUNCTION,
|
||||
time_taken_ms);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
@ -439,8 +459,9 @@ void OptimizedCompilationJob::RecordFunctionCompilation(
|
||||
|
||||
Handle<Script> script(
|
||||
Script::cast(compilation_info()->shared_info()->script()), isolate);
|
||||
LogFunctionCompilation(tag, compilation_info()->shared_info(), script,
|
||||
abstract_code, true, time_taken_ms, isolate);
|
||||
Compiler::LogFunctionCompilation(
|
||||
isolate, tag, compilation_info()->shared_info(), script, abstract_code,
|
||||
compilation_info()->code_kind(), time_taken_ms);
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
@ -1036,7 +1057,12 @@ Handle<Code> ContinuationForConcurrentOptimization(
|
||||
function->set_code(function->feedback_vector().optimized_code());
|
||||
}
|
||||
return handle(function->code(), isolate);
|
||||
} else if (function->shared().HasBaselineData()) {
|
||||
Code baseline_code = function->shared().baseline_data().baseline_code();
|
||||
function->set_code(baseline_code);
|
||||
return handle(baseline_code, isolate);
|
||||
}
|
||||
DCHECK(function->ActiveTierIsIgnition());
|
||||
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
|
||||
}
|
||||
|
||||
@ -1211,6 +1237,11 @@ void FinalizeUnoptimizedCompilation(
|
||||
if (FLAG_interpreted_frames_native_stack) {
|
||||
InstallInterpreterTrampolineCopy(isolate, shared_info);
|
||||
}
|
||||
if (FLAG_always_sparkplug && shared_info->HasBytecodeArray() &&
|
||||
!shared_info->HasBreakInfo()) {
|
||||
// TODO(v8:11429) Extract to Compiler::CompileX
|
||||
CompileWithBaseline(isolate, shared_info);
|
||||
}
|
||||
Handle<CoverageInfo> coverage_info;
|
||||
if (finalize_data.coverage_info().ToHandle(&coverage_info)) {
|
||||
isolate->debug()->InstallCoverageInfo(shared_info, coverage_info);
|
||||
@ -1818,6 +1849,7 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
|
||||
!Compile(shared_info, flag, is_compiled_scope)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DCHECK(is_compiled_scope->is_compiled());
|
||||
Handle<Code> code = handle(shared_info->GetCode(), isolate);
|
||||
|
||||
@ -1829,6 +1861,14 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
|
||||
// immediately after a flush would be better.
|
||||
JSFunction::InitializeFeedbackCell(function, is_compiled_scope, true);
|
||||
|
||||
// If --always-sparkplug is enabled, make sure we have sparkplug code.
|
||||
// TODO(v8:11429): Extract out the rest of the if into a "can baseline
|
||||
// compile" predicate, or similar.
|
||||
if (FLAG_always_sparkplug && !function->shared().HasAsmWasmData() &&
|
||||
!function->shared().HasDebugInfo()) {
|
||||
DCHECK(shared_info->HasBaselineData());
|
||||
}
|
||||
|
||||
// Optimize now if --always-opt is enabled.
|
||||
if (FLAG_always_opt && !function->shared().HasAsmWasmData()) {
|
||||
CompilerTracer::TraceOptimizeForAlwaysOpt(isolate, function,
|
||||
@ -1845,6 +1885,11 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
|
||||
// Install code on closure.
|
||||
function->set_code(*code);
|
||||
|
||||
// Install a feedback vector if necessary.
|
||||
if (code->kind() == CodeKind::SPARKPLUG) {
|
||||
JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
|
||||
}
|
||||
|
||||
// Check postconditions on success.
|
||||
DCHECK(!isolate->has_pending_exception());
|
||||
DCHECK(function->shared().is_compiled());
|
||||
|
@ -73,6 +73,12 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
|
||||
static bool CompileOptimized(Handle<JSFunction> function,
|
||||
ConcurrencyMode mode, CodeKind code_kind);
|
||||
|
||||
static void LogFunctionCompilation(Isolate* isolate,
|
||||
CodeEventListener::LogEventsAndTags tag,
|
||||
Handle<SharedFunctionInfo> shared,
|
||||
Handle<Script> script,
|
||||
Handle<AbstractCode> abstract_code,
|
||||
CodeKind kind, double time_taken_ms);
|
||||
// Collect source positions for a function that has already been compiled to
|
||||
// bytecode, but for which source positions were not collected (e.g. because
|
||||
// they were not immediately needed).
|
||||
|
@ -89,6 +89,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
|
||||
const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
|
||||
const Register GrowArrayElementsDescriptor::KeyRegister() { return ecx; }
|
||||
|
||||
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
}
|
||||
const Register BaselineLeaveFrameDescriptor::WeightRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// static
|
||||
const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
|
||||
|
||||
@ -213,12 +222,24 @@ void CompareDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void Compare_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
}
|
||||
|
||||
void BinaryOpDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {edx, eax};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
}
|
||||
|
||||
void ApiCallbackDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
|
@ -197,12 +197,26 @@ const Register FastNewObjectDescriptor::NewTargetRegister() {
|
||||
return kJavaScriptCallNewTargetRegister;
|
||||
}
|
||||
|
||||
void TailCallOptimizedCodeSlotDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {kJavaScriptCallCodeStartRegister};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void LoadDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister()};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void LoadBaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {LoadDescriptor::ReceiverRegister(),
|
||||
LoadDescriptor::NameRegister(),
|
||||
LoadDescriptor::SlotRegister()};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void LoadNoFeedbackDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {ReceiverRegister(), NameRegister(), ICKindRegister()};
|
||||
@ -215,6 +229,18 @@ void LoadGlobalDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void LoadGlobalBaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {LoadGlobalDescriptor::NameRegister(),
|
||||
LoadGlobalDescriptor::SlotRegister()};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void LookupBaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
DefaultInitializePlatformSpecific(data, kParameterCount);
|
||||
}
|
||||
|
||||
void LoadGlobalNoFeedbackDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {NameRegister(), ICKindRegister()};
|
||||
@ -237,6 +263,16 @@ void LoadWithReceiverAndVectorDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(len, registers);
|
||||
}
|
||||
|
||||
void LoadWithReceiverBaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
LoadWithReceiverAndVectorDescriptor::ReceiverRegister(),
|
||||
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister(),
|
||||
LoadWithReceiverAndVectorDescriptor::NameRegister(),
|
||||
LoadWithReceiverAndVectorDescriptor::SlotRegister()};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void StoreGlobalDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {NameRegister(), ValueRegister(), SlotRegister()};
|
||||
@ -245,6 +281,16 @@ void StoreGlobalDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(len, registers);
|
||||
}
|
||||
|
||||
void StoreGlobalBaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {StoreGlobalDescriptor::NameRegister(),
|
||||
StoreGlobalDescriptor::ValueRegister(),
|
||||
StoreGlobalDescriptor::SlotRegister()};
|
||||
|
||||
int len = arraysize(registers) - kStackArgumentsCount;
|
||||
data->InitializePlatformSpecific(len, registers);
|
||||
}
|
||||
|
||||
void StoreGlobalWithVectorDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {NameRegister(), ValueRegister(), SlotRegister(),
|
||||
@ -262,6 +308,16 @@ void StoreDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(len, registers);
|
||||
}
|
||||
|
||||
void StoreBaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
|
||||
StoreDescriptor::ValueRegister(), StoreDescriptor::SlotRegister()};
|
||||
|
||||
int len = arraysize(registers) - kStackArgumentsCount;
|
||||
data->InitializePlatformSpecific(len, registers);
|
||||
}
|
||||
|
||||
void StoreTransitionDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
@ -272,6 +328,33 @@ void StoreTransitionDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(len, registers);
|
||||
}
|
||||
|
||||
void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on other platforms.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
Register registers[] = {
|
||||
kContextRegister,
|
||||
kJSFunctionRegister,
|
||||
kJavaScriptCallArgCountRegister,
|
||||
kInterpreterBytecodeArrayRegister,
|
||||
};
|
||||
data->InitializePlatformSpecific(kParameterCount, registers);
|
||||
#else
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
#endif
|
||||
}
|
||||
|
||||
void BaselineLeaveFrameDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on other platforms.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
Register registers[] = {ParamsSizeRegister(), WeightRegister()};
|
||||
data->InitializePlatformSpecific(kParameterCount, registers);
|
||||
#else
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
#endif
|
||||
}
|
||||
|
||||
void StringAtDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
DefaultInitializePlatformSpecific(data, kParameterCount);
|
||||
@ -422,6 +505,11 @@ void CloneObjectWithVectorDescriptor::InitializePlatformSpecific(
|
||||
DefaultInitializePlatformSpecific(data, kParameterCount);
|
||||
}
|
||||
|
||||
void CloneObjectBaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
DefaultInitializePlatformSpecific(data, kParameterCount);
|
||||
}
|
||||
|
||||
// static
|
||||
Register RunMicrotasksDescriptor::MicrotaskQueueRegister() {
|
||||
return CallDescriptors::call_descriptor_data(CallDescriptors::RunMicrotasks)
|
||||
@ -458,6 +546,11 @@ void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
|
||||
DefaultInitializePlatformSpecific(data, 4);
|
||||
}
|
||||
|
||||
void CallTrampoline_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
DefaultInitializePlatformSpecific(data, kParameterCount);
|
||||
}
|
||||
|
||||
void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
DefaultInitializePlatformSpecific(data, 4);
|
||||
@ -468,6 +561,11 @@ void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
|
||||
DefaultInitializePlatformSpecific(data, 4);
|
||||
}
|
||||
|
||||
void CallWithSpread_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
DefaultInitializePlatformSpecific(data, kParameterCount);
|
||||
}
|
||||
|
||||
void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
DefaultInitializePlatformSpecific(data, 4);
|
||||
@ -478,6 +576,12 @@ void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
|
||||
DefaultInitializePlatformSpecific(data, 4);
|
||||
}
|
||||
|
||||
void ConstructWithSpread_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
DefaultInitializePlatformSpecific(data,
|
||||
kParameterCount - kStackArgumentsCount);
|
||||
}
|
||||
|
||||
void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
DefaultInitializePlatformSpecific(data, 4);
|
||||
@ -493,5 +597,15 @@ void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
|
||||
DefaultInitializePlatformSpecific(data, 3);
|
||||
}
|
||||
|
||||
void UnaryOp_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
DefaultInitializePlatformSpecific(data, 2);
|
||||
}
|
||||
|
||||
void ForInPrepareDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
DefaultInitializePlatformSpecific(data, kParameterCount);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -33,19 +33,24 @@ namespace internal {
|
||||
V(BigIntToI32Pair) \
|
||||
V(BigIntToI64) \
|
||||
V(BinaryOp) \
|
||||
V(BinaryOp_Baseline) \
|
||||
V(BinaryOp_WithFeedback) \
|
||||
V(CallForwardVarargs) \
|
||||
V(CallFunctionTemplate) \
|
||||
V(CallTrampoline) \
|
||||
V(CallTrampoline_Baseline) \
|
||||
V(CallTrampoline_WithFeedback) \
|
||||
V(CallVarargs) \
|
||||
V(CallWithArrayLike) \
|
||||
V(CallWithArrayLike_WithFeedback) \
|
||||
V(CallWithSpread) \
|
||||
V(CallWithSpread_Baseline) \
|
||||
V(CallWithSpread_WithFeedback) \
|
||||
V(CEntry1ArgvOnStack) \
|
||||
V(CloneObjectBaseline) \
|
||||
V(CloneObjectWithVector) \
|
||||
V(Compare) \
|
||||
V(Compare_Baseline) \
|
||||
V(Compare_WithFeedback) \
|
||||
V(ConstructForwardVarargs) \
|
||||
V(ConstructStub) \
|
||||
@ -53,13 +58,16 @@ namespace internal {
|
||||
V(ConstructWithArrayLike) \
|
||||
V(ConstructWithArrayLike_WithFeedback) \
|
||||
V(Construct_WithFeedback) \
|
||||
V(Construct_Baseline) \
|
||||
V(ConstructWithSpread) \
|
||||
V(ConstructWithSpread_Baseline) \
|
||||
V(ConstructWithSpread_WithFeedback) \
|
||||
V(ContextOnly) \
|
||||
V(CppBuiltinAdaptor) \
|
||||
V(DynamicCheckMaps) \
|
||||
V(EphemeronKeyBarrier) \
|
||||
V(FastNewObject) \
|
||||
V(ForInPrepare) \
|
||||
V(FrameDropperTrampoline) \
|
||||
V(GetIteratorStackParameter) \
|
||||
V(GetProperty) \
|
||||
@ -69,16 +77,23 @@ namespace internal {
|
||||
V(InterpreterCEntry1) \
|
||||
V(InterpreterCEntry2) \
|
||||
V(InterpreterDispatch) \
|
||||
V(TailCallOptimizedCodeSlot) \
|
||||
V(InterpreterPushArgsThenCall) \
|
||||
V(InterpreterPushArgsThenConstruct) \
|
||||
V(JSTrampoline) \
|
||||
V(BaselineOutOfLinePrologue) \
|
||||
V(BaselineLeaveFrame) \
|
||||
V(Load) \
|
||||
V(LoadBaseline) \
|
||||
V(LoadGlobal) \
|
||||
V(LoadGlobalBaseline) \
|
||||
V(LoadGlobalNoFeedback) \
|
||||
V(LoadGlobalWithVector) \
|
||||
V(LoadNoFeedback) \
|
||||
V(LoadWithVector) \
|
||||
V(LoadWithReceiverAndVector) \
|
||||
V(LoadWithReceiverBaseline) \
|
||||
V(LookupBaseline) \
|
||||
V(NoContext) \
|
||||
V(RecordWrite) \
|
||||
V(ResumeGenerator) \
|
||||
@ -86,7 +101,9 @@ namespace internal {
|
||||
V(RunMicrotasksEntry) \
|
||||
V(SingleParameterOnStack) \
|
||||
V(Store) \
|
||||
V(StoreBaseline) \
|
||||
V(StoreGlobal) \
|
||||
V(StoreGlobalBaseline) \
|
||||
V(StoreGlobalWithVector) \
|
||||
V(StoreTransition) \
|
||||
V(StoreWithVector) \
|
||||
@ -96,6 +113,7 @@ namespace internal {
|
||||
V(TypeConversion) \
|
||||
V(TypeConversionNoContext) \
|
||||
V(Typeof) \
|
||||
V(UnaryOp_Baseline) \
|
||||
V(UnaryOp_WithFeedback) \
|
||||
V(Void) \
|
||||
V(WasmFloat32ToNumber) \
|
||||
@ -327,6 +345,14 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// Initializes |data| to an unspecified state, for platforms that haven't
|
||||
// implemented a given builtin.
|
||||
static void InitializePlatformUnimplemented(CallInterfaceDescriptorData* data,
|
||||
int register_parameter_count) {
|
||||
DefaultInitializePlatformSpecific(data,
|
||||
std::min(register_parameter_count, 4));
|
||||
}
|
||||
|
||||
virtual void InitializePlatformIndependent(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// Default descriptor configuration: one result, all parameters are passed
|
||||
@ -520,11 +546,25 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
|
||||
kNewTarget, \
|
||||
kActualArgumentsCount, \
|
||||
##__VA_ARGS__, \
|
||||
\
|
||||
kParameterCount, \
|
||||
kContext = kParameterCount /* implicit parameter */ \
|
||||
};
|
||||
|
||||
#define DEFINE_JS_PARAMETERS_NO_CONTEXT(...) \
|
||||
static constexpr int kDescriptorFlags = \
|
||||
CallInterfaceDescriptorData::kAllowVarArgs | \
|
||||
CallInterfaceDescriptorData::kNoContext; \
|
||||
static constexpr int kReturnCount = 1; \
|
||||
static constexpr StackArgumentOrder kStackArgumentOrder = \
|
||||
StackArgumentOrder::kJS; \
|
||||
enum ParameterIndices { \
|
||||
kTarget, \
|
||||
kNewTarget, \
|
||||
kActualArgumentsCount, \
|
||||
##__VA_ARGS__, \
|
||||
kParameterCount, \
|
||||
};
|
||||
|
||||
#define DEFINE_JS_PARAMETER_TYPES(...) \
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), /* kTarget */ \
|
||||
MachineType::AnyTagged(), /* kNewTarget */ \
|
||||
@ -652,6 +692,17 @@ class LoadDescriptor : public CallInterfaceDescriptor {
|
||||
static const Register SlotRegister();
|
||||
};
|
||||
|
||||
// LoadBaselineDescriptor is a load descriptor that does not take a context as
|
||||
// input.
|
||||
class LoadBaselineDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kSlot)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
|
||||
MachineType::AnyTagged(), // kName
|
||||
MachineType::TaggedSigned()) // kSlot
|
||||
DECLARE_DESCRIPTOR(LoadBaselineDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class LoadGlobalNoFeedbackDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kName, kICKind)
|
||||
@ -705,6 +756,23 @@ class LoadGlobalDescriptor : public CallInterfaceDescriptor {
|
||||
}
|
||||
};
|
||||
|
||||
class LoadGlobalBaselineDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_NO_CONTEXT(kName, kSlot)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
|
||||
MachineType::TaggedSigned()) // kSlot
|
||||
DECLARE_DESCRIPTOR(LoadGlobalBaselineDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class LookupBaselineDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_NO_CONTEXT(kName, kDepth, kSlot)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
|
||||
MachineType::AnyTagged(), // kDepth
|
||||
MachineType::AnyTagged()) // kSlot
|
||||
DECLARE_DESCRIPTOR(LookupBaselineDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class StoreDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kReceiver, kName, kValue, kSlot)
|
||||
@ -729,6 +797,25 @@ class StoreDescriptor : public CallInterfaceDescriptor {
|
||||
static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
|
||||
};
|
||||
|
||||
class StoreBaselineDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kValue, kSlot)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
|
||||
MachineType::AnyTagged(), // kName
|
||||
MachineType::AnyTagged(), // kValue
|
||||
MachineType::TaggedSigned()) // kSlot
|
||||
DECLARE_DESCRIPTOR(StoreBaselineDescriptor, CallInterfaceDescriptor)
|
||||
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
static const bool kPassLastArgsOnStack = true;
|
||||
#else
|
||||
static const bool kPassLastArgsOnStack = false;
|
||||
#endif
|
||||
|
||||
// Pass value and slot through the stack.
|
||||
static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
|
||||
};
|
||||
|
||||
class StoreTransitionDescriptor : public StoreDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kReceiver, kName, kMap, kValue, kSlot, kVector)
|
||||
@ -790,6 +877,20 @@ class StoreGlobalDescriptor : public CallInterfaceDescriptor {
|
||||
}
|
||||
};
|
||||
|
||||
class StoreGlobalBaselineDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_NO_CONTEXT(kName, kValue, kSlot)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
|
||||
MachineType::AnyTagged(), // kValue
|
||||
MachineType::TaggedSigned()) // kSlot
|
||||
DECLARE_DESCRIPTOR(StoreGlobalBaselineDescriptor, CallInterfaceDescriptor)
|
||||
|
||||
static const bool kPassLastArgsOnStack =
|
||||
StoreDescriptor::kPassLastArgsOnStack;
|
||||
// Pass value and slot through the stack.
|
||||
static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
|
||||
};
|
||||
|
||||
class StoreGlobalWithVectorDescriptor : public StoreGlobalDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kName, kValue, kSlot, kVector)
|
||||
@ -858,6 +959,18 @@ class LoadWithReceiverAndVectorDescriptor : public LoadWithVectorDescriptor {
|
||||
static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
|
||||
};
|
||||
|
||||
class LoadWithReceiverBaselineDescriptor : public LoadBaselineDescriptor {
|
||||
public:
|
||||
// TODO(v8:9497): Revert the Machine type for kSlot to the
|
||||
// TaggedSigned once Torque can emit better call descriptors
|
||||
DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kLookupStartObject, kName, kSlot)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
|
||||
MachineType::AnyTagged(), // kLookupStartObject
|
||||
MachineType::AnyTagged(), // kName
|
||||
MachineType::AnyTagged()) // kSlot
|
||||
DECLARE_DESCRIPTOR(LoadWithReceiverBaselineDescriptor, LoadBaselineDescriptor)
|
||||
};
|
||||
|
||||
class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kName, kSlot, kVector)
|
||||
@ -1019,16 +1132,27 @@ class CallWithSpreadDescriptor : public CallInterfaceDescriptor {
|
||||
DECLARE_DESCRIPTOR(CallWithSpreadDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
// TODO(v8:11429,jgruber): Pass the slot as UintPtr.
|
||||
class CallWithSpread_BaselineDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread, kSlot)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
|
||||
MachineType::Int32(), // kArgumentsCount
|
||||
MachineType::AnyTagged(), // kSpread
|
||||
MachineType::Int32()) // kSlot
|
||||
DECLARE_DESCRIPTOR(CallWithSpread_BaselineDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
// TODO(jgruber): Pass the slot as UintPtr.
|
||||
class CallWithSpread_WithFeedbackDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread, kSlot,
|
||||
kMaybeFeedbackVector)
|
||||
kFeedbackVector)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
|
||||
MachineType::Int32(), // kArgumentsCount
|
||||
MachineType::AnyTagged(), // kSpread
|
||||
MachineType::Int32(), // kSlot
|
||||
MachineType::AnyTagged()) // kMaybeFeedbackVector
|
||||
MachineType::AnyTagged()) // kFeedbackVector
|
||||
DECLARE_DESCRIPTOR(CallWithSpread_WithFeedbackDescriptor,
|
||||
CallInterfaceDescriptor)
|
||||
};
|
||||
@ -1045,11 +1169,11 @@ class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
|
||||
class CallWithArrayLike_WithFeedbackDescriptor
|
||||
: public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kTarget, kArgumentsList, kSlot, kMaybeFeedbackVector)
|
||||
DEFINE_PARAMETERS(kTarget, kArgumentsList, kSlot, kFeedbackVector)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
|
||||
MachineType::AnyTagged(), // kArgumentsList
|
||||
MachineType::Int32(), // kSlot
|
||||
MachineType::AnyTagged()) // kMaybeFeedbackVector
|
||||
MachineType::AnyTagged()) // kFeedbackVector
|
||||
DECLARE_DESCRIPTOR(CallWithArrayLike_WithFeedbackDescriptor,
|
||||
CallInterfaceDescriptor)
|
||||
};
|
||||
@ -1077,16 +1201,37 @@ class ConstructWithSpreadDescriptor : public CallInterfaceDescriptor {
|
||||
DECLARE_DESCRIPTOR(ConstructWithSpreadDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
// TODO(v8:11429,jgruber): Pass the slot as UintPtr.
|
||||
class ConstructWithSpread_BaselineDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
// Note: kSlot comes before kSpread since as an untagged value it must be
|
||||
// passed in a register.
|
||||
DEFINE_JS_PARAMETERS(kSlot, kSpread)
|
||||
DEFINE_JS_PARAMETER_TYPES(MachineType::Int32(), // kSlot
|
||||
MachineType::AnyTagged()) // kSpread
|
||||
DECLARE_DESCRIPTOR(ConstructWithSpread_BaselineDescriptor,
|
||||
CallInterfaceDescriptor)
|
||||
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
static const bool kPassLastArgsOnStack = true;
|
||||
#else
|
||||
static const bool kPassLastArgsOnStack = false;
|
||||
#endif
|
||||
|
||||
// Pass spread through the stack.
|
||||
static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
|
||||
};
|
||||
|
||||
// TODO(jgruber): Pass the slot as UintPtr.
|
||||
class ConstructWithSpread_WithFeedbackDescriptor
|
||||
: public CallInterfaceDescriptor {
|
||||
public:
|
||||
// Note: kSlot comes before kSpread since as an untagged value it must be
|
||||
// passed in a register.
|
||||
DEFINE_JS_PARAMETERS(kSlot, kSpread, kMaybeFeedbackVector)
|
||||
DEFINE_JS_PARAMETERS(kSlot, kSpread, kFeedbackVector)
|
||||
DEFINE_JS_PARAMETER_TYPES(MachineType::Int32(), // kSlot
|
||||
MachineType::AnyTagged(), // kSpread
|
||||
MachineType::AnyTagged()) // kMaybeFeedbackVector
|
||||
MachineType::AnyTagged()) // kFeedbackVector
|
||||
DECLARE_DESCRIPTOR(ConstructWithSpread_WithFeedbackDescriptor,
|
||||
CallInterfaceDescriptor)
|
||||
};
|
||||
@ -1104,13 +1249,12 @@ class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
|
||||
class ConstructWithArrayLike_WithFeedbackDescriptor
|
||||
: public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList, kSlot,
|
||||
kMaybeFeedbackVector)
|
||||
DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList, kSlot, kFeedbackVector)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
|
||||
MachineType::AnyTagged(), // kNewTarget
|
||||
MachineType::AnyTagged(), // kArgumentsList
|
||||
MachineType::Int32(), // kSlot
|
||||
MachineType::AnyTagged()) // kMaybeFeedbackVector
|
||||
MachineType::AnyTagged()) // kFeedbackVector
|
||||
DECLARE_DESCRIPTOR(ConstructWithArrayLike_WithFeedbackDescriptor,
|
||||
CallInterfaceDescriptor)
|
||||
};
|
||||
@ -1202,6 +1346,15 @@ class BinaryOpDescriptor : public CallInterfaceDescriptor {
|
||||
DECLARE_DESCRIPTOR(BinaryOpDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class BinaryOp_BaselineDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kSlot)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
|
||||
MachineType::AnyTagged(), // kRight
|
||||
MachineType::UintPtr()) // kSlot
|
||||
DECLARE_DESCRIPTOR(BinaryOp_BaselineDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
// This desciptor is shared among String.p.charAt/charCodeAt/codePointAt
|
||||
// as they all have the same interface.
|
||||
class StringAtDescriptor final : public CallInterfaceDescriptor {
|
||||
@ -1299,6 +1452,39 @@ class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
|
||||
static const Register KeyRegister();
|
||||
};
|
||||
|
||||
class V8_EXPORT_PRIVATE TailCallOptimizedCodeSlotDescriptor
|
||||
: public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kOptimizedCodeEntry)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged()) // kAccumulator
|
||||
DECLARE_DESCRIPTOR(TailCallOptimizedCodeSlotDescriptor,
|
||||
CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class BaselineOutOfLinePrologueDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_NO_CONTEXT(kCalleeContext, kClosure,
|
||||
kJavaScriptCallArgCount,
|
||||
kInterpreterBytecodeArray)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kCalleeContext
|
||||
MachineType::AnyTagged(), // kClosure
|
||||
MachineType::Int32(), // kJavaScriptCallArgCount
|
||||
MachineType::AnyTagged()) // kInterpreterBytecodeArray
|
||||
DECLARE_DESCRIPTOR(BaselineOutOfLinePrologueDescriptor,
|
||||
CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class BaselineLeaveFrameDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_NO_CONTEXT(kParamsSize, kWeight)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kParamsSize
|
||||
MachineType::Int32()) // kWeight
|
||||
DECLARE_DESCRIPTOR(BaselineLeaveFrameDescriptor, CallInterfaceDescriptor)
|
||||
|
||||
static const Register ParamsSizeRegister();
|
||||
static const Register WeightRegister();
|
||||
};
|
||||
|
||||
class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
|
||||
: public CallInterfaceDescriptor {
|
||||
public:
|
||||
@ -1367,6 +1553,18 @@ class InterpreterCEntry2Descriptor : public CallInterfaceDescriptor {
|
||||
DECLARE_DESCRIPTOR(InterpreterCEntry2Descriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class ForInPrepareDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_RESULT_AND_PARAMETERS(2, kEnumerator, kVectorIndex, kFeedbackVector)
|
||||
DEFINE_RESULT_AND_PARAMETER_TYPES(
|
||||
MachineType::AnyTagged(), // result 1 (cache array)
|
||||
MachineType::AnyTagged(), // result 2 (cache length)
|
||||
MachineType::AnyTagged(), // kEnumerator
|
||||
MachineType::TaggedSigned(), // kVectorIndex
|
||||
MachineType::AnyTagged()) // kFeedbackVector
|
||||
DECLARE_DESCRIPTOR(ForInPrepareDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class ResumeGeneratorDescriptor final : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kValue, kGenerator)
|
||||
@ -1501,59 +1699,104 @@ class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
|
||||
DECLARE_DESCRIPTOR(CloneObjectWithVectorDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class CloneObjectBaselineDescriptor final : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_NO_CONTEXT(kSource, kFlags, kSlot)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kSource
|
||||
MachineType::TaggedSigned(), // kFlags
|
||||
MachineType::TaggedSigned()) // kSlot
|
||||
DECLARE_DESCRIPTOR(CloneObjectBaselineDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class BinaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kMaybeFeedbackVector)
|
||||
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kFeedbackVector)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
|
||||
MachineType::AnyTagged(), // kRight
|
||||
MachineType::UintPtr(), // kSlot
|
||||
MachineType::AnyTagged()) // kMaybeFeedbackVector
|
||||
MachineType::AnyTagged()) // kFeedbackVector
|
||||
DECLARE_DESCRIPTOR(BinaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
// TODO(v8:11429,jgruber): Pass the slot as UintPtr.
|
||||
class CallTrampoline_BaselineDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount, kSlot)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
|
||||
MachineType::Int32(), // kActualArgumentsCount
|
||||
MachineType::Int32()) // kSlot
|
||||
DECLARE_DESCRIPTOR(CallTrampoline_BaselineDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
// TODO(jgruber): Pass the slot as UintPtr.
|
||||
class CallTrampoline_WithFeedbackDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount, kSlot,
|
||||
kMaybeFeedbackVector)
|
||||
kFeedbackVector)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
|
||||
MachineType::Int32(), // kActualArgumentsCount
|
||||
MachineType::Int32(), // kSlot
|
||||
MachineType::AnyTagged()) // kMaybeFeedbackVector
|
||||
MachineType::AnyTagged()) // kFeedbackVector
|
||||
DECLARE_DESCRIPTOR(CallTrampoline_WithFeedbackDescriptor,
|
||||
CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class Compare_WithFeedbackDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kMaybeFeedbackVector)
|
||||
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kFeedbackVector)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
|
||||
MachineType::AnyTagged(), // kRight
|
||||
MachineType::UintPtr(), // kSlot
|
||||
MachineType::AnyTagged()) // kMaybeFeedbackVector
|
||||
MachineType::AnyTagged()) // kFeedbackVector
|
||||
DECLARE_DESCRIPTOR(Compare_WithFeedbackDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class Compare_BaselineDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kSlot)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
|
||||
MachineType::AnyTagged(), // kRight
|
||||
MachineType::UintPtr()) // kSlot
|
||||
DECLARE_DESCRIPTOR(Compare_BaselineDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
// TODO(v8:11429,jgruber): Pass the slot as UintPtr.
|
||||
class Construct_BaselineDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_JS_PARAMETERS_NO_CONTEXT(kSlot)
|
||||
DEFINE_JS_PARAMETER_TYPES(MachineType::Int32()) // kSlot
|
||||
DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_BaselineDescriptor,
|
||||
CallInterfaceDescriptor, 1)
|
||||
};
|
||||
|
||||
// TODO(jgruber): Pass the slot as UintPtr.
|
||||
class Construct_WithFeedbackDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
// kSlot is passed in a register, kMaybeFeedbackVector on the stack.
|
||||
DEFINE_JS_PARAMETERS(kSlot, kMaybeFeedbackVector)
|
||||
// kSlot is passed in a register, kFeedbackVector on the stack.
|
||||
DEFINE_JS_PARAMETERS(kSlot, kFeedbackVector)
|
||||
DEFINE_JS_PARAMETER_TYPES(MachineType::Int32(), // kSlot
|
||||
MachineType::AnyTagged()) // kMaybeFeedbackVector
|
||||
MachineType::AnyTagged()) // kFeedbackVector
|
||||
DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_WithFeedbackDescriptor,
|
||||
CallInterfaceDescriptor, 1)
|
||||
};
|
||||
|
||||
class UnaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kValue, kSlot, kMaybeFeedbackVector)
|
||||
DEFINE_PARAMETERS(kValue, kSlot, kFeedbackVector)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
|
||||
MachineType::UintPtr(), // kSlot
|
||||
MachineType::AnyTagged()) // kMaybeFeedbackVector
|
||||
MachineType::AnyTagged()) // kFeedbackVector
|
||||
DECLARE_DESCRIPTOR(UnaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class UnaryOp_BaselineDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kValue, kSlot)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
|
||||
MachineType::UintPtr()) // kSlot
|
||||
DECLARE_DESCRIPTOR(UnaryOp_BaselineDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
|
||||
class Name##Descriptor : public CallInterfaceDescriptor { \
|
||||
public: \
|
||||
|
@ -112,6 +112,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
|
||||
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
|
||||
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
|
||||
|
||||
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
}
|
||||
const Register BaselineLeaveFrameDescriptor::WeightRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// static
|
||||
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
|
||||
|
||||
@ -235,12 +244,24 @@ void CompareDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void Compare_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
}
|
||||
|
||||
void BinaryOpDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {a1, a0};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
}
|
||||
|
||||
void ApiCallbackDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
|
@ -112,6 +112,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
|
||||
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
|
||||
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
|
||||
|
||||
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
}
|
||||
const Register BaselineLeaveFrameDescriptor::WeightRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// static
|
||||
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
|
||||
|
||||
@ -235,12 +244,24 @@ void CompareDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void Compare_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
}
|
||||
|
||||
void BinaryOpDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {a1, a0};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
}
|
||||
|
||||
void ApiCallbackDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
|
@ -86,6 +86,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
|
||||
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
|
||||
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
|
||||
|
||||
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
}
|
||||
const Register BaselineLeaveFrameDescriptor::WeightRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// static
|
||||
const Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
|
||||
|
||||
@ -209,12 +218,24 @@ void CompareDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void Compare_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
}
|
||||
|
||||
void BinaryOpDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {r4, r3};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
}
|
||||
|
||||
void ApiCallbackDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
|
@ -86,6 +86,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r5; }
|
||||
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
|
||||
const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
|
||||
|
||||
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
}
|
||||
const Register BaselineLeaveFrameDescriptor::WeightRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// static
|
||||
const Register TypeConversionDescriptor::ArgumentRegister() { return r2; }
|
||||
|
||||
@ -209,12 +218,24 @@ void CompareDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void Compare_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
}
|
||||
|
||||
void BinaryOpDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {r3, r2};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
}
|
||||
|
||||
void ApiCallbackDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
|
@ -5,6 +5,8 @@
|
||||
#include "src/codegen/source-position-table.h"
|
||||
|
||||
#include "src/base/export-template.h"
|
||||
#include "src/base/logging.h"
|
||||
#include "src/common/assert-scope.h"
|
||||
#include "src/heap/local-factory-inl.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
#include "src/objects/objects.h"
|
||||
@ -36,7 +38,10 @@ using ValueBits = base::BitField8<unsigned, 0, 7>;
|
||||
void AddAndSetEntry(PositionTableEntry* value,
|
||||
const PositionTableEntry& other) {
|
||||
value->code_offset += other.code_offset;
|
||||
DCHECK_IMPLIES(value->code_offset != kFunctionEntryBytecodeOffset,
|
||||
value->code_offset >= 0);
|
||||
value->source_position += other.source_position;
|
||||
DCHECK_LE(0, value->source_position);
|
||||
value->is_statement = other.is_statement;
|
||||
}
|
||||
|
||||
|
@ -118,7 +118,7 @@ bool TurboAssemblerBase::IsAddressableThroughRootRegister(
|
||||
void TurboAssemblerBase::RecordCommentForOffHeapTrampoline(int builtin_index) {
|
||||
if (!FLAG_code_comments) return;
|
||||
std::ostringstream str;
|
||||
str << "-- Inlined Trampoline to " << Builtins::name(builtin_index) << " --";
|
||||
str << "[ Inlined Trampoline to " << Builtins::name(builtin_index);
|
||||
RecordComment(str.str().c_str());
|
||||
}
|
||||
|
||||
|
@ -117,9 +117,9 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
|
||||
static constexpr int kStackPageSize = 4 * KB;
|
||||
#endif
|
||||
|
||||
protected:
|
||||
void RecordCommentForOffHeapTrampoline(int builtin_index);
|
||||
|
||||
protected:
|
||||
Isolate* const isolate_ = nullptr;
|
||||
|
||||
// This handle will be patched with the code object on installation.
|
||||
|
@ -93,6 +93,11 @@ const Register ApiGetterDescriptor::CallbackRegister() { return rbx; }
|
||||
const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
|
||||
const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
|
||||
|
||||
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
|
||||
return rbx;
|
||||
}
|
||||
const Register BaselineLeaveFrameDescriptor::WeightRegister() { return rcx; }
|
||||
|
||||
void TypeofDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {rbx};
|
||||
@ -216,12 +221,24 @@ void CompareDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void Compare_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {rdx, rax, rbx};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void BinaryOpDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {rdx, rax};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {rdx, rax, rbx};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void ApiCallbackDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
|
@ -204,6 +204,15 @@ void TurboAssembler::LoadTaggedPointerField(Register destination,
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::LoadTaggedSignedField(Register destination,
|
||||
Operand field_operand) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
DecompressTaggedSigned(destination, field_operand);
|
||||
} else {
|
||||
mov_tagged(destination, field_operand);
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::LoadAnyTaggedField(Register destination,
|
||||
Operand field_operand) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
@ -257,6 +266,16 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
|
||||
Smi value) {
|
||||
if (SmiValuesAre32Bits()) {
|
||||
movl(Operand(dst_field_operand, kSmiShift / kBitsPerByte),
|
||||
Immediate(value.value()));
|
||||
} else {
|
||||
StoreTaggedField(dst_field_operand, Immediate(value));
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::DecompressTaggedSigned(Register destination,
|
||||
Operand field_operand) {
|
||||
RecordComment("[ DecompressTaggedSigned");
|
||||
@ -1350,6 +1369,9 @@ void TurboAssembler::Move(Register dst, Register src) {
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::Move(Register dst, Operand src) { movq(dst, src); }
|
||||
void TurboAssembler::Move(Register dst, Immediate src) { movl(dst, src); }
|
||||
|
||||
void TurboAssembler::Move(XMMRegister dst, XMMRegister src) {
|
||||
if (dst != src) {
|
||||
Movaps(dst, src);
|
||||
@ -1604,6 +1626,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
|
||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
|
||||
jmp(kScratchRegister);
|
||||
if (FLAG_code_comments) RecordComment("]");
|
||||
bind(&skip);
|
||||
return;
|
||||
}
|
||||
@ -1686,6 +1709,18 @@ void TurboAssembler::CallBuiltin(int builtin_index) {
|
||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
|
||||
call(kScratchRegister);
|
||||
if (FLAG_code_comments) RecordComment("]");
|
||||
}
|
||||
|
||||
void TurboAssembler::TailCallBuiltin(int builtin_index) {
|
||||
DCHECK(Builtins::IsBuiltinId(builtin_index));
|
||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
|
||||
jmp(kScratchRegister);
|
||||
if (FLAG_code_comments) RecordComment("]");
|
||||
}
|
||||
|
||||
void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
||||
@ -3126,11 +3161,16 @@ void TurboAssembler::Prologue() {
|
||||
void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||
pushq(rbp);
|
||||
movq(rbp, rsp);
|
||||
Push(Immediate(StackFrame::TypeToMarker(type)));
|
||||
if (type != StackFrame::MANUAL) {
|
||||
Push(Immediate(StackFrame::TypeToMarker(type)));
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
|
||||
if (emit_debug_code()) {
|
||||
// TODO(v8:11429): Consider passing SPARKPLUG instead, and checking for
|
||||
// IsJSFrame or similar. Could then unify with manual frame leaves in the
|
||||
// interpreter too.
|
||||
if (emit_debug_code() && type != StackFrame::MANUAL) {
|
||||
cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
|
||||
Immediate(StackFrame::TypeToMarker(type)));
|
||||
Check(equal, AbortReason::kStackFrameTypesMustMatch);
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "src/codegen/x64/assembler-x64.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/objects/contexts.h"
|
||||
#include "src/objects/tagged-index.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -326,6 +327,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void Push(Operand src);
|
||||
void Push(Immediate value);
|
||||
void Push(Smi smi);
|
||||
void Push(TaggedIndex index) {
|
||||
Push(Immediate(static_cast<uint32_t>(index.ptr())));
|
||||
}
|
||||
void Push(Handle<HeapObject> source);
|
||||
|
||||
enum class PushArrayOrder { kNormal, kReverse };
|
||||
@ -445,6 +449,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
movq(dst, constant);
|
||||
}
|
||||
|
||||
void Move(Register dst, TaggedIndex source) {
|
||||
movl(dst, Immediate(static_cast<uint32_t>(source.ptr())));
|
||||
}
|
||||
|
||||
void Move(Operand dst, TaggedIndex source) {
|
||||
movl(dst, Immediate(static_cast<uint32_t>(source.ptr())));
|
||||
}
|
||||
|
||||
void Move(Register dst, ExternalReference ext);
|
||||
|
||||
void Move(XMMRegister dst, uint32_t src);
|
||||
@ -457,6 +469,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void Move(Register target, Register source);
|
||||
void Move(XMMRegister target, XMMRegister source);
|
||||
|
||||
void Move(Register target, Operand source);
|
||||
void Move(Register target, Immediate source);
|
||||
|
||||
void Move(Register dst, Handle<HeapObject> source,
|
||||
RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
|
||||
void Move(Operand dst, Handle<HeapObject> source,
|
||||
@ -512,6 +527,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
Operand EntryFromBuiltinIndexAsOperand(Register builtin_index);
|
||||
void CallBuiltinByIndex(Register builtin_index) override;
|
||||
void CallBuiltin(int builtin_index);
|
||||
void TailCallBuiltin(int builtin_index);
|
||||
|
||||
void LoadCodeObjectEntry(Register destination, Register code_object) override;
|
||||
void CallCodeObject(Register code_object) override;
|
||||
@ -744,6 +760,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// compression is enabled.
|
||||
void LoadTaggedPointerField(Register destination, Operand field_operand);
|
||||
|
||||
// Loads a field containing a Smi and decompresses it if pointer compression
|
||||
// is enabled.
|
||||
void LoadTaggedSignedField(Register destination, Operand field_operand);
|
||||
|
||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||
void LoadAnyTaggedField(Register destination, Operand field_operand);
|
||||
|
||||
@ -764,6 +784,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// location.
|
||||
void StoreTaggedField(Operand dst_field_operand, Immediate immediate);
|
||||
void StoreTaggedField(Operand dst_field_operand, Register value);
|
||||
void StoreTaggedSignedField(Operand dst_field_operand, Smi value);
|
||||
|
||||
// The following macros work even when pointer compression is not enabled.
|
||||
void DecompressTaggedSigned(Register destination, Operand field_operand);
|
||||
|
@ -454,6 +454,10 @@ constexpr int kNoSourcePosition = -1;
|
||||
// bytecode offset.
|
||||
constexpr int kFunctionEntryBytecodeOffset = -1;
|
||||
|
||||
// This constant is used to signal the function exit interrupt budget handling
|
||||
// bytecode offset.
|
||||
constexpr int kFunctionExitBytecodeOffset = -1;
|
||||
|
||||
// This constant is used to indicate missing deoptimization information.
|
||||
constexpr int kNoDeoptimizationId = -1;
|
||||
|
||||
|
@ -1073,8 +1073,10 @@ void Debug::PrepareStep(StepAction step_action) {
|
||||
if (!EnsureBreakInfo(shared)) return;
|
||||
PrepareFunctionForDebugExecution(shared);
|
||||
|
||||
Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
|
||||
// PrepareFunctionForDebugExecution can invalidate Sparkplug frames
|
||||
js_frame = JavaScriptFrame::cast(frames_it.Reframe());
|
||||
|
||||
Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
|
||||
location = BreakLocation::FromFrame(debug_info, js_frame);
|
||||
|
||||
// Any step at a return is a step-out, and a step-out at a suspend behaves
|
||||
@ -1244,6 +1246,10 @@ void Debug::DeoptimizeFunction(Handle<SharedFunctionInfo> shared) {
|
||||
// inlining.
|
||||
isolate_->AbortConcurrentOptimization(BlockingBehavior::kBlock);
|
||||
|
||||
if (shared->GetCode().kind() == CodeKind::SPARKPLUG) {
|
||||
Deoptimizer::DeoptimizeSparkplug(*shared);
|
||||
}
|
||||
|
||||
bool found_something = false;
|
||||
Code::OptimizedCodeIterator iterator(isolate_);
|
||||
do {
|
||||
|
@ -96,6 +96,7 @@ std::vector<wasm_addr_t> WasmModuleDebug::GetCallStack(
|
||||
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
|
||||
case StackFrame::OPTIMIZED:
|
||||
case StackFrame::INTERPRETED:
|
||||
case StackFrame::SPARKPLUG:
|
||||
case StackFrame::BUILTIN:
|
||||
case StackFrame::WASM: {
|
||||
// A standard frame may include many summarized frames, due to inlining.
|
||||
@ -154,6 +155,7 @@ std::vector<FrameSummary> WasmModuleDebug::FindWasmFrame(
|
||||
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
|
||||
case StackFrame::OPTIMIZED:
|
||||
case StackFrame::INTERPRETED:
|
||||
case StackFrame::SPARKPLUG:
|
||||
case StackFrame::BUILTIN:
|
||||
case StackFrame::WASM: {
|
||||
// A standard frame may include many summarized frames, due to inlining.
|
||||
|
@ -382,6 +382,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
|
||||
TraceDeoptAll(isolate);
|
||||
isolate->AbortConcurrentOptimization(BlockingBehavior::kBlock);
|
||||
DisallowGarbageCollection no_gc;
|
||||
DeoptimizeAllSparkplug(isolate);
|
||||
// For all contexts, mark all code, then deoptimize.
|
||||
Object context = isolate->heap()->native_contexts_list();
|
||||
while (!context.IsUndefined(isolate)) {
|
||||
@ -420,6 +421,73 @@ void Deoptimizer::MarkAllCodeForContext(NativeContext native_context) {
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
class DeoptimizeSparkplugVisitor : public ThreadVisitor {
|
||||
public:
|
||||
explicit DeoptimizeSparkplugVisitor(SharedFunctionInfo shared)
|
||||
: shared_(shared) {}
|
||||
DeoptimizeSparkplugVisitor() : shared_(SharedFunctionInfo()) {}
|
||||
|
||||
void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
|
||||
bool deopt_all = shared_ == SharedFunctionInfo();
|
||||
for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
|
||||
if (it.frame()->type() == StackFrame::SPARKPLUG) {
|
||||
SparkplugFrame* frame = SparkplugFrame::cast(it.frame());
|
||||
if (!deopt_all && frame->function().shared() != shared_) continue;
|
||||
frame->InterpretedFrame::PatchBytecodeOffset(
|
||||
frame->GetBytecodeOffset());
|
||||
Address* pc_addr = frame->pc_address();
|
||||
Address advance = BUILTIN_CODE(isolate, InterpreterEnterBytecodeAdvance)
|
||||
->InstructionStart();
|
||||
PointerAuthentication::ReplacePC(pc_addr, advance, kSystemPointerSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
SharedFunctionInfo shared_;
|
||||
DISALLOW_GARBAGE_COLLECTION(no_gc_)
|
||||
};
|
||||
} // namespace
|
||||
|
||||
void Deoptimizer::DeoptimizeSparkplug(SharedFunctionInfo shared) {
|
||||
DCHECK_EQ(shared.GetCode().kind(), CodeKind::SPARKPLUG);
|
||||
Isolate* isolate = shared.GetIsolate();
|
||||
DeoptimizeSparkplugVisitor visitor(shared);
|
||||
visitor.VisitThread(isolate, isolate->thread_local_top());
|
||||
isolate->thread_manager()->IterateArchivedThreads(&visitor);
|
||||
// TODO(v8:11429): Avoid this heap walk somehow.
|
||||
HeapObjectIterator iterator(isolate->heap());
|
||||
auto trampoline = BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
|
||||
shared.flush_baseline_data();
|
||||
for (HeapObject obj = iterator.Next(); !obj.is_null();
|
||||
obj = iterator.Next()) {
|
||||
if (obj.IsJSFunction()) {
|
||||
JSFunction fun = JSFunction::cast(obj);
|
||||
if (fun.shared() == shared && fun.code().kind() == CodeKind::SPARKPLUG) {
|
||||
fun.set_code(*trampoline);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Deoptimizer::DeoptimizeAllSparkplug(Isolate* isolate) {
|
||||
DeoptimizeSparkplugVisitor visitor;
|
||||
visitor.VisitThread(isolate, isolate->thread_local_top());
|
||||
HeapObjectIterator iterator(isolate->heap());
|
||||
auto trampoline = BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
|
||||
isolate->thread_manager()->IterateArchivedThreads(&visitor);
|
||||
for (HeapObject obj = iterator.Next(); !obj.is_null();
|
||||
obj = iterator.Next()) {
|
||||
if (obj.IsJSFunction()) {
|
||||
JSFunction fun = JSFunction::cast(obj);
|
||||
if (fun.shared().HasBaselineData()) {
|
||||
fun.set_code(*trampoline);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
|
||||
Isolate* isolate = function.GetIsolate();
|
||||
RuntimeCallTimerScope runtimeTimer(isolate,
|
||||
|
@ -66,6 +66,12 @@ class Deoptimizer : public Malloced {
|
||||
// instead of the function code (e.g. OSR code not installed on function).
|
||||
static void DeoptimizeFunction(JSFunction function, Code code = Code());
|
||||
|
||||
// From Sparkplug to Ignition.
|
||||
// TODO(v8:11429): Consider moving this to the debugger, since it's only for
|
||||
// debug.
|
||||
static void DeoptimizeSparkplug(SharedFunctionInfo shared);
|
||||
static void DeoptimizeAllSparkplug(Isolate* isolate);
|
||||
|
||||
// Deoptimize all code in the given isolate.
|
||||
V8_EXPORT_PRIVATE static void DeoptimizeAll(Isolate* isolate);
|
||||
|
||||
|
@ -817,6 +817,7 @@ void JSFunction::JSFunctionVerify(Isolate* isolate) {
|
||||
TorqueGeneratedClassVerifiers::JSFunctionVerify(*this, isolate);
|
||||
CHECK(code().IsCode());
|
||||
CHECK(map().is_callable());
|
||||
CHECK_IMPLIES(code().kind() == CodeKind::SPARKPLUG, has_feedback_vector());
|
||||
Handle<JSFunction> function(*this, isolate);
|
||||
LookupIterator it(isolate, function, isolate->factory()->prototype_string(),
|
||||
LookupIterator::OWN_SKIP_INTERCEPTOR);
|
||||
|
@ -27,6 +27,8 @@
|
||||
|
||||
#include "src/diagnostics/perf-jit.h"
|
||||
|
||||
#include "src/common/assert-scope.h"
|
||||
|
||||
// Only compile the {PerfJitLogger} on Linux.
|
||||
#if V8_OS_LINUX
|
||||
|
||||
@ -211,7 +213,8 @@ void PerfJitLogger::LogRecordedBuffer(
|
||||
(abstract_code->kind() != CodeKind::INTERPRETED_FUNCTION &&
|
||||
abstract_code->kind() != CodeKind::TURBOFAN &&
|
||||
abstract_code->kind() != CodeKind::NATIVE_CONTEXT_INDEPENDENT &&
|
||||
abstract_code->kind() != CodeKind::TURBOPROP)) {
|
||||
abstract_code->kind() != CodeKind::TURBOPROP &&
|
||||
abstract_code->kind() != CodeKind::SPARKPLUG)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -335,9 +338,17 @@ SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
|
||||
|
||||
void PerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
|
||||
Handle<SharedFunctionInfo> shared) {
|
||||
DisallowGarbageCollection no_gc;
|
||||
// TODO(v8:11429,cbruni): add proper sparkplug source position iterator
|
||||
bool is_sparkplug = code->kind() == CodeKind::SPARKPLUG;
|
||||
ByteArray source_position_table = code->SourcePositionTable();
|
||||
if (is_sparkplug) {
|
||||
source_position_table =
|
||||
shared->GetBytecodeArray(shared->GetIsolate()).SourcePositionTable();
|
||||
}
|
||||
// Compute the entry count and get the name of the script.
|
||||
uint32_t entry_count = 0;
|
||||
for (SourcePositionTableIterator iterator(code->SourcePositionTable());
|
||||
for (SourcePositionTableIterator iterator(source_position_table);
|
||||
!iterator.done(); iterator.Advance()) {
|
||||
entry_count++;
|
||||
}
|
||||
@ -358,7 +369,7 @@ void PerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
|
||||
size += entry_count * sizeof(PerfJitDebugEntry);
|
||||
// Add the size of the name after each entry.
|
||||
|
||||
for (SourcePositionTableIterator iterator(code->SourcePositionTable());
|
||||
for (SourcePositionTableIterator iterator(source_position_table);
|
||||
!iterator.done(); iterator.Advance()) {
|
||||
SourcePositionInfo info(
|
||||
GetSourcePositionInfo(code, shared, iterator.source_position()));
|
||||
@ -371,7 +382,7 @@ void PerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
|
||||
|
||||
Address code_start = code->InstructionStart();
|
||||
|
||||
for (SourcePositionTableIterator iterator(code->SourcePositionTable());
|
||||
for (SourcePositionTableIterator iterator(source_position_table);
|
||||
!iterator.done(); iterator.Advance()) {
|
||||
SourcePositionInfo info(
|
||||
GetSourcePositionInfo(code, shared, iterator.source_position()));
|
||||
|
@ -218,6 +218,9 @@ inline OptimizedFrame::OptimizedFrame(StackFrameIteratorBase* iterator)
|
||||
inline InterpretedFrame::InterpretedFrame(StackFrameIteratorBase* iterator)
|
||||
: JavaScriptFrame(iterator) {}
|
||||
|
||||
inline SparkplugFrame::SparkplugFrame(StackFrameIteratorBase* iterator)
|
||||
: InterpretedFrame(iterator) {}
|
||||
|
||||
inline BuiltinFrame::BuiltinFrame(StackFrameIteratorBase* iterator)
|
||||
: TypedFrameWithJSLinkage(iterator) {}
|
||||
|
||||
@ -287,6 +290,11 @@ inline CommonFrame* StackTraceFrameIterator::frame() const {
|
||||
return static_cast<CommonFrame*>(frame);
|
||||
}
|
||||
|
||||
inline CommonFrame* StackTraceFrameIterator::Reframe() {
|
||||
iterator_.Reframe();
|
||||
return frame();
|
||||
}
|
||||
|
||||
bool StackTraceFrameIterator::is_javascript() const {
|
||||
return frame()->is_java_script();
|
||||
}
|
||||
|
@ -122,6 +122,12 @@ void StackFrameIterator::Advance() {
|
||||
DCHECK(!done() || handler_ == nullptr);
|
||||
}
|
||||
|
||||
StackFrame* StackFrameIterator::Reframe() {
|
||||
StackFrame::Type type = frame_->ComputeType(this, &frame_->state_);
|
||||
frame_ = SingletonFor(type, &frame_->state_);
|
||||
return frame();
|
||||
}
|
||||
|
||||
void StackFrameIterator::Reset(ThreadLocalTop* top) {
|
||||
StackFrame::State state;
|
||||
StackFrame::Type type =
|
||||
@ -219,10 +225,13 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
|
||||
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeAdvance);
|
||||
Code interpreter_bytecode_dispatch =
|
||||
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
|
||||
Code baseline_prologue =
|
||||
isolate->builtins()->builtin(Builtins::kBaselineOutOfLinePrologue);
|
||||
|
||||
if (interpreter_entry_trampoline.contains(pc) ||
|
||||
interpreter_bytecode_advance.contains(pc) ||
|
||||
interpreter_bytecode_dispatch.contains(pc)) {
|
||||
interpreter_bytecode_dispatch.contains(pc) ||
|
||||
baseline_prologue.contains(pc)) {
|
||||
return true;
|
||||
} else if (FLAG_interpreted_frames_native_stack) {
|
||||
intptr_t marker = Memory<intptr_t>(
|
||||
@ -581,6 +590,10 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
|
||||
if (code_obj.is_interpreter_trampoline_builtin()) {
|
||||
return INTERPRETED;
|
||||
}
|
||||
if (code_obj.is_baseline_prologue_builtin() ||
|
||||
code_obj.is_baseline_leave_frame_builtin()) {
|
||||
return SPARKPLUG;
|
||||
}
|
||||
if (code_obj.is_turbofanned()) {
|
||||
// TODO(bmeurer): We treat frames for BUILTIN Code objects as
|
||||
// OptimizedFrame for now (all the builtins with JavaScript
|
||||
@ -593,6 +606,8 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
|
||||
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
|
||||
case CodeKind::TURBOPROP:
|
||||
return OPTIMIZED;
|
||||
case CodeKind::SPARKPLUG:
|
||||
return Type::SPARKPLUG;
|
||||
case CodeKind::JS_TO_WASM_FUNCTION:
|
||||
return JS_TO_WASM;
|
||||
case CodeKind::JS_TO_JS_FUNCTION:
|
||||
@ -978,6 +993,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
|
||||
break;
|
||||
case OPTIMIZED:
|
||||
case INTERPRETED:
|
||||
case SPARKPLUG:
|
||||
case BUILTIN:
|
||||
// These frame types have a context, but they are actually stored
|
||||
// in the place on the stack that one finds the frame type.
|
||||
@ -1173,7 +1189,8 @@ Script JavaScriptFrame::script() const {
|
||||
int CommonFrameWithJSLinkage::LookupExceptionHandlerInTable(
|
||||
int* stack_depth, HandlerTable::CatchPrediction* prediction) {
|
||||
DCHECK(!LookupCode().has_handler_table());
|
||||
DCHECK(!LookupCode().is_optimized_code());
|
||||
DCHECK(!LookupCode().is_optimized_code() ||
|
||||
LookupCode().kind() == CodeKind::SPARKPLUG);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -1346,6 +1363,7 @@ FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
|
||||
is_constructor_(is_constructor),
|
||||
parameters_(parameters, isolate) {
|
||||
DCHECK(abstract_code.IsBytecodeArray() ||
|
||||
Code::cast(abstract_code).kind() == CodeKind::SPARKPLUG ||
|
||||
!CodeKindIsOptimizedJSFunction(Code::cast(abstract_code).kind()));
|
||||
}
|
||||
|
||||
@ -1732,6 +1750,14 @@ void InterpretedFrame::PatchBytecodeOffset(int new_offset) {
|
||||
SetExpression(index, Smi::FromInt(raw_offset));
|
||||
}
|
||||
|
||||
int SparkplugFrame::GetBytecodeOffset() const {
|
||||
return LookupCode().GetBytecodeOffsetForSparkplugPC(this->pc());
|
||||
}
|
||||
|
||||
intptr_t SparkplugFrame::GetPCForBytecodeOffset(int bytecode_offset) const {
|
||||
return LookupCode().GetSparkplugPCForBytecodeOffset(bytecode_offset);
|
||||
}
|
||||
|
||||
BytecodeArray InterpretedFrame::GetBytecodeArray() const {
|
||||
const int index = InterpreterFrameConstants::kBytecodeArrayExpressionIndex;
|
||||
DCHECK_EQ(InterpreterFrameConstants::kBytecodeArrayFromFp,
|
||||
|
@ -5,6 +5,7 @@
|
||||
#ifndef V8_EXECUTION_FRAMES_H_
|
||||
#define V8_EXECUTION_FRAMES_H_
|
||||
|
||||
#include "src/base/bounds.h"
|
||||
#include "src/codegen/safepoint-table.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/handles/handles.h"
|
||||
@ -18,6 +19,7 @@
|
||||
// - JavaScriptFrame (aka StandardFrame)
|
||||
// - InterpretedFrame
|
||||
// - OptimizedFrame
|
||||
// - SparkplugFrame
|
||||
// - TypedFrameWithJSLinkage
|
||||
// - BuiltinFrame
|
||||
// - JavaScriptBuiltinContinuationFrame
|
||||
@ -93,7 +95,6 @@ class StackHandler {
|
||||
V(ENTRY, EntryFrame) \
|
||||
V(CONSTRUCT_ENTRY, ConstructEntryFrame) \
|
||||
V(EXIT, ExitFrame) \
|
||||
V(OPTIMIZED, OptimizedFrame) \
|
||||
V(WASM, WasmFrame) \
|
||||
V(WASM_TO_JS, WasmToJsFrame) \
|
||||
V(JS_TO_WASM, JsToWasmFrame) \
|
||||
@ -102,6 +103,8 @@ class StackHandler {
|
||||
V(WASM_EXIT, WasmExitFrame) \
|
||||
V(WASM_COMPILE_LAZY, WasmCompileLazyFrame) \
|
||||
V(INTERPRETED, InterpretedFrame) \
|
||||
V(SPARKPLUG, SparkplugFrame) \
|
||||
V(OPTIMIZED, OptimizedFrame) \
|
||||
V(STUB, StubFrame) \
|
||||
V(BUILTIN_CONTINUATION, BuiltinContinuationFrame) \
|
||||
V(JAVA_SCRIPT_BUILTIN_CONTINUATION, JavaScriptBuiltinContinuationFrame) \
|
||||
@ -126,6 +129,11 @@ class StackFrame {
|
||||
};
|
||||
#undef DECLARE_TYPE
|
||||
|
||||
bool IsUnoptimizedJavaScriptFrame() const {
|
||||
STATIC_ASSERT(SPARKPLUG == INTERPRETED + 1);
|
||||
return base::IsInRange(type(), INTERPRETED, SPARKPLUG);
|
||||
}
|
||||
|
||||
// Used to mark the outermost JS entry frame.
|
||||
//
|
||||
// The mark is an opaque value that should be pushed onto the stack directly,
|
||||
@ -206,7 +214,12 @@ class StackFrame {
|
||||
bool is_construct_entry() const { return type() == CONSTRUCT_ENTRY; }
|
||||
bool is_exit() const { return type() == EXIT; }
|
||||
bool is_optimized() const { return type() == OPTIMIZED; }
|
||||
bool is_interpreted() const { return type() == INTERPRETED; }
|
||||
// TODO(v8:11429): Clean up these predicates, distinguishing interpreted from
|
||||
// sparkplug frames, and adding a new predicate that covers both.
|
||||
bool is_interpreted() const {
|
||||
return type() == INTERPRETED || type() == SPARKPLUG;
|
||||
}
|
||||
bool is_sparkplug() const { return type() == SPARKPLUG; }
|
||||
bool is_wasm() const { return this->type() == WASM; }
|
||||
bool is_wasm_compile_lazy() const { return type() == WASM_COMPILE_LAZY; }
|
||||
bool is_wasm_debug_break() const { return type() == WASM_DEBUG_BREAK; }
|
||||
@ -225,8 +238,10 @@ class StackFrame {
|
||||
bool is_builtin_exit() const { return type() == BUILTIN_EXIT; }
|
||||
|
||||
bool is_java_script() const {
|
||||
Type type = this->type();
|
||||
return (type == OPTIMIZED) || (type == INTERPRETED);
|
||||
STATIC_ASSERT(INTERPRETED + 1 == SPARKPLUG);
|
||||
STATIC_ASSERT(SPARKPLUG + 1 == OPTIMIZED);
|
||||
Type t = type();
|
||||
return t >= INTERPRETED && t <= OPTIMIZED;
|
||||
}
|
||||
bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
|
||||
bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
|
||||
@ -829,7 +844,7 @@ class InterpretedFrame : public JavaScriptFrame {
|
||||
int* data, HandlerTable::CatchPrediction* prediction) override;
|
||||
|
||||
// Returns the current offset into the bytecode stream.
|
||||
int GetBytecodeOffset() const;
|
||||
virtual int GetBytecodeOffset() const;
|
||||
|
||||
// Updates the current offset into the bytecode stream, mainly used for stack
|
||||
// unwinding to continue execution at a different bytecode offset.
|
||||
@ -865,6 +880,27 @@ class InterpretedFrame : public JavaScriptFrame {
|
||||
friend class StackFrameIteratorBase;
|
||||
};
|
||||
|
||||
class SparkplugFrame : public InterpretedFrame {
|
||||
public:
|
||||
Type type() const override { return SPARKPLUG; }
|
||||
|
||||
// Returns the current offset into the bytecode stream.
|
||||
int GetBytecodeOffset() const override;
|
||||
|
||||
intptr_t GetPCForBytecodeOffset(int lookup_offset) const;
|
||||
|
||||
static SparkplugFrame* cast(StackFrame* frame) {
|
||||
DCHECK(frame->is_sparkplug());
|
||||
return static_cast<SparkplugFrame*>(frame);
|
||||
}
|
||||
|
||||
protected:
|
||||
inline explicit SparkplugFrame(StackFrameIteratorBase* iterator);
|
||||
|
||||
private:
|
||||
friend class StackFrameIteratorBase;
|
||||
};
|
||||
|
||||
// Builtin frames are built for builtins with JavaScript linkage, such as
|
||||
// various standard library functions (i.e. Math.asin, Math.floor, etc.).
|
||||
class BuiltinFrame final : public TypedFrameWithJSLinkage {
|
||||
@ -1168,6 +1204,7 @@ class StackFrameIterator : public StackFrameIteratorBase {
|
||||
return frame_;
|
||||
}
|
||||
V8_EXPORT_PRIVATE void Advance();
|
||||
StackFrame* Reframe();
|
||||
|
||||
private:
|
||||
// Go back to the first frame.
|
||||
@ -1204,6 +1241,7 @@ class V8_EXPORT_PRIVATE StackTraceFrameIterator {
|
||||
int FrameFunctionCount() const;
|
||||
|
||||
inline CommonFrame* frame() const;
|
||||
inline CommonFrame* Reframe();
|
||||
|
||||
inline bool is_javascript() const;
|
||||
inline bool is_wasm() const;
|
||||
|
@ -1046,6 +1046,7 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
|
||||
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
|
||||
case StackFrame::OPTIMIZED:
|
||||
case StackFrame::INTERPRETED:
|
||||
case StackFrame::SPARKPLUG:
|
||||
case StackFrame::BUILTIN:
|
||||
case StackFrame::WASM: {
|
||||
// A standard frame may include many summarized frames (due to
|
||||
@ -1856,7 +1857,8 @@ Object Isolate::UnwindAndFindHandler() {
|
||||
code.constant_pool(), return_sp, frame->fp());
|
||||
}
|
||||
|
||||
case StackFrame::INTERPRETED: {
|
||||
case StackFrame::INTERPRETED:
|
||||
case StackFrame::SPARKPLUG: {
|
||||
// For interpreted frame we perform a range lookup in the handler table.
|
||||
if (!catchable_by_js) break;
|
||||
InterpretedFrame* js_frame = static_cast<InterpretedFrame*>(frame);
|
||||
@ -1881,6 +1883,21 @@ Object Isolate::UnwindAndFindHandler() {
|
||||
// the correct context for the handler from the interpreter register.
|
||||
Context context =
|
||||
Context::cast(js_frame->ReadInterpreterRegister(context_reg));
|
||||
DCHECK(context.IsContext());
|
||||
|
||||
if (frame->type() == StackFrame::SPARKPLUG) {
|
||||
Code code = frame->LookupCode();
|
||||
intptr_t pc_offset =
|
||||
static_cast<SparkplugFrame*>(frame)->GetPCForBytecodeOffset(
|
||||
offset);
|
||||
// Write the context directly into the context register, so that we
|
||||
// don't need to have a context read + write in the baseline code.
|
||||
js_frame->WriteInterpreterRegister(
|
||||
interpreter::Register::current_context().index(), context);
|
||||
return FoundHandler(Context(), code.InstructionStart(), pc_offset,
|
||||
code.constant_pool(), return_sp, frame->fp());
|
||||
}
|
||||
|
||||
js_frame->PatchBytecodeOffset(static_cast<int>(offset));
|
||||
|
||||
Code code =
|
||||
@ -2009,6 +2026,7 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
|
||||
// For JavaScript frames we perform a lookup in the handler table.
|
||||
case StackFrame::OPTIMIZED:
|
||||
case StackFrame::INTERPRETED:
|
||||
case StackFrame::SPARKPLUG:
|
||||
case StackFrame::BUILTIN: {
|
||||
JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
|
||||
Isolate::CatchType prediction = ToCatchType(PredictException(js_frame));
|
||||
|
@ -1326,6 +1326,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
|
||||
}
|
||||
|
||||
OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
|
||||
DCHECK_NOT_NULL(optimizing_compile_dispatcher_);
|
||||
return optimizing_compile_dispatcher_;
|
||||
}
|
||||
// Flushes all pending concurrent optimzation jobs from the optimizing
|
||||
|
@ -164,7 +164,7 @@ void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
|
||||
PrintF(scope.file(), "]\n");
|
||||
}
|
||||
|
||||
DCHECK_EQ(StackFrame::INTERPRETED, frame->type());
|
||||
DCHECK(frame->IsUnoptimizedJavaScriptFrame());
|
||||
int level = frame->GetBytecodeArray().osr_loop_nesting_level();
|
||||
frame->GetBytecodeArray().set_osr_loop_nesting_level(std::min(
|
||||
{level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker}));
|
||||
@ -344,7 +344,7 @@ void RuntimeProfiler::MarkCandidatesForOptimization(JavaScriptFrame* frame) {
|
||||
|
||||
void RuntimeProfiler::MarkCandidatesForOptimizationFromBytecode() {
|
||||
JavaScriptFrameIterator it(isolate_);
|
||||
DCHECK(it.frame()->is_interpreted());
|
||||
DCHECK(it.frame()->IsUnoptimizedJavaScriptFrame());
|
||||
MarkCandidatesForOptimization(it.frame());
|
||||
}
|
||||
|
||||
|
@ -435,6 +435,9 @@ DEFINE_NEG_IMPLICATION(jitless, track_field_types)
|
||||
DEFINE_NEG_IMPLICATION(jitless, track_heap_object_fields)
|
||||
// Regexps are interpreted.
|
||||
DEFINE_IMPLICATION(jitless, regexp_interpret_all)
|
||||
// No Sparkplug compilation.
|
||||
DEFINE_NEG_IMPLICATION(jitless, sparkplug)
|
||||
DEFINE_NEG_IMPLICATION(jitless, always_sparkplug)
|
||||
// asm.js validation is disabled since it triggers wasm code generation.
|
||||
DEFINE_NEG_IMPLICATION(jitless, validate_asm)
|
||||
// --jitless also implies --no-expose-wasm, see InitializeOncePerProcessImpl.
|
||||
@ -509,6 +512,7 @@ DEFINE_INT(scale_factor_for_feedback_allocation, 4,
|
||||
DEFINE_BOOL(feedback_allocation_on_bytecode_size, false,
|
||||
"Instead of a fixed budget for lazy feedback vector allocation, "
|
||||
"scale it based in the bytecode size.")
|
||||
DEFINE_IMPLICATION(sparkplug, feedback_allocation_on_bytecode_size)
|
||||
DEFINE_BOOL(lazy_feedback_allocation, true, "Allocate feedback vectors lazily")
|
||||
|
||||
// Flags for Ignition.
|
||||
@ -571,6 +575,13 @@ DEFINE_UINT_READONLY(max_minimorphic_map_checks, 4,
|
||||
DEFINE_INT(ticks_scale_factor_for_top_tier, 10,
|
||||
"scale factor for profiler ticks when tiering up from midtier")
|
||||
|
||||
// Flags for Sparkplug
|
||||
DEFINE_BOOL(sparkplug, false, "enable experimental sparkplug baseline compiler")
|
||||
DEFINE_BOOL(always_sparkplug, false, "directly tier up to sparkplug")
|
||||
DEFINE_BOOL(sparkplug_inline_smi, true, "inline fast paths for smi ops")
|
||||
DEFINE_NEG_IMPLICATION(sparkplug, write_protect_code_memory)
|
||||
DEFINE_IMPLICATION(always_sparkplug, sparkplug)
|
||||
|
||||
// Flags for concurrent recompilation.
|
||||
DEFINE_BOOL(concurrent_recompilation, true,
|
||||
"optimizing hot functions asynchronously on a separate thread")
|
||||
|
@ -3392,10 +3392,12 @@ Handle<JSFunction> Factory::JSFunctionBuilder::Build() {
|
||||
|
||||
Handle<JSFunction> result = BuildRaw(code);
|
||||
|
||||
if (have_cached_code) {
|
||||
if (have_cached_code || code->kind() == CodeKind::SPARKPLUG) {
|
||||
IsCompiledScope is_compiled_scope(sfi_->is_compiled_scope(isolate_));
|
||||
JSFunction::EnsureFeedbackVector(result, &is_compiled_scope);
|
||||
if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceHit(sfi_, code);
|
||||
if (FLAG_trace_turbo_nci && have_cached_code) {
|
||||
CompilationCacheCode::TraceHit(sfi_, code);
|
||||
}
|
||||
}
|
||||
|
||||
Compiler::PostInstantiation(result);
|
||||
|
@ -830,6 +830,14 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
|
||||
return *this;
|
||||
}
|
||||
|
||||
CodeBuilder& set_bytecode_offset_table(Handle<ByteArray> table) {
|
||||
DCHECK(!table.is_null());
|
||||
// TODO(v8:11429): Rename this and clean up calls to SourcePositionTable
|
||||
// under Sparkplug.
|
||||
source_position_table_ = table;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CodeBuilder& set_deoptimization_data(
|
||||
Handle<DeoptimizationData> deopt_data) {
|
||||
DCHECK(!deopt_data.is_null());
|
||||
@ -877,6 +885,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
|
||||
int32_t builtin_index_ = Builtins::kNoBuiltinId;
|
||||
uint32_t inlined_bytecode_size_ = 0;
|
||||
int32_t kind_specific_flags_ = 0;
|
||||
// Contains bytecode offset table for sparkplug
|
||||
Handle<ByteArray> source_position_table_;
|
||||
Handle<DeoptimizationData> deoptimization_data_ =
|
||||
DeoptimizationData::Empty(isolate_);
|
||||
|
@ -1872,9 +1872,7 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
|
||||
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
|
||||
for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
|
||||
!it.done(); it.Advance()) {
|
||||
if (it.frame()->type() == StackFrame::INTERPRETED) {
|
||||
return;
|
||||
}
|
||||
if (it.frame()->IsUnoptimizedJavaScriptFrame()) return;
|
||||
if (it.frame()->type() == StackFrame::OPTIMIZED) {
|
||||
Code code = it.frame()->LookupCode();
|
||||
if (!code.CanDeoptAt(it.frame()->pc())) {
|
||||
|
@ -2755,12 +2755,15 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
|
||||
|
||||
TVARIABLE(MaybeObject, var_handler);
|
||||
Label if_handler(this, &var_handler), non_inlined(this, Label::kDeferred),
|
||||
try_polymorphic(this), miss(this, Label::kDeferred);
|
||||
try_polymorphic(this), miss(this, Label::kDeferred),
|
||||
no_feedback(this, Label::kDeferred);
|
||||
|
||||
TNode<Map> lookup_start_object_map =
|
||||
LoadReceiverMap(p->receiver_and_lookup_start_object());
|
||||
GotoIf(IsDeprecatedMap(lookup_start_object_map), &miss);
|
||||
|
||||
GotoIf(IsUndefined(p->vector()), &no_feedback);
|
||||
|
||||
// Check monomorphic case.
|
||||
TNode<MaybeObject> feedback =
|
||||
TryMonomorphicCase(p->slot(), CAST(p->vector()), lookup_start_object_map,
|
||||
@ -2788,6 +2791,16 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
|
||||
&if_handler, &miss, &direct_exit);
|
||||
}
|
||||
|
||||
BIND(&no_feedback);
|
||||
{
|
||||
Comment("LoadIC_nofeedback");
|
||||
// Call into the stub that implements the non-inlined parts of LoadIC.
|
||||
direct_exit.ReturnCallStub(
|
||||
Builtins::CallableFor(isolate(), Builtins::kLoadIC_NoFeedback),
|
||||
p->context(), p->receiver(), p->name(),
|
||||
SmiConstant(FeedbackSlotKind::kLoadProperty));
|
||||
}
|
||||
|
||||
BIND(&miss);
|
||||
direct_exit.ReturnCallRuntime(Runtime::kLoadIC_Miss, p->context(),
|
||||
p->receiver_and_lookup_start_object(),
|
||||
@ -3426,7 +3439,10 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
|
||||
}
|
||||
|
||||
void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
|
||||
Label if_lexical_var(this), if_heapobject(this);
|
||||
Label no_feedback(this, Label::kDeferred), if_lexical_var(this),
|
||||
if_heapobject(this);
|
||||
GotoIf(IsUndefined(pp->vector()), &no_feedback);
|
||||
|
||||
TNode<MaybeObject> maybe_weak_ref =
|
||||
LoadFeedbackVectorSlot(CAST(pp->vector()), pp->slot());
|
||||
Branch(TaggedIsSmi(maybe_weak_ref), &if_lexical_var, &if_heapobject);
|
||||
@ -3481,6 +3497,12 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
|
||||
StoreContextElement(script_context, slot_index, pp->value());
|
||||
Return(pp->value());
|
||||
}
|
||||
|
||||
BIND(&no_feedback);
|
||||
{
|
||||
TailCallRuntime(Runtime::kStoreGlobalICNoFeedback_Miss, pp->context(),
|
||||
pp->value(), pp->name());
|
||||
}
|
||||
}
|
||||
|
||||
void AccessorAssembler::StoreGlobalIC_PropertyCellCase(
|
||||
@ -3833,6 +3855,18 @@ void AccessorAssembler::GenerateLoadICTrampoline() {
|
||||
TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateLoadICBaseline() {
|
||||
using Descriptor = LoadBaselineDescriptor;
|
||||
|
||||
auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
||||
auto name = Parameter<Object>(Descriptor::kName);
|
||||
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
|
||||
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
|
||||
TNode<Context> context = LoadContextFromBaseline();
|
||||
|
||||
TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateLoadICTrampoline_Megamorphic() {
|
||||
using Descriptor = LoadDescriptor;
|
||||
|
||||
@ -3861,6 +3895,20 @@ void AccessorAssembler::GenerateLoadSuperIC() {
|
||||
LoadSuperIC(&p);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateLoadSuperICBaseline() {
|
||||
using Descriptor = LoadWithReceiverBaselineDescriptor;
|
||||
|
||||
auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
||||
auto lookup_start_object = Parameter<Object>(Descriptor::kLookupStartObject);
|
||||
auto name = Parameter<Object>(Descriptor::kName);
|
||||
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
|
||||
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
|
||||
TNode<Context> context = LoadContextFromBaseline();
|
||||
|
||||
TailCallBuiltin(Builtins::kLoadSuperIC, context, receiver,
|
||||
lookup_start_object, name, slot, vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateLoadGlobalIC_NoFeedback() {
|
||||
using Descriptor = LoadGlobalNoFeedbackDescriptor;
|
||||
|
||||
@ -3903,6 +3951,79 @@ void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
|
||||
TailCallStub(callable, context, name, slot, vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateLoadGlobalICBaseline(TypeofMode typeof_mode) {
|
||||
using Descriptor = LoadGlobalBaselineDescriptor;
|
||||
|
||||
auto name = Parameter<Object>(Descriptor::kName);
|
||||
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
|
||||
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
|
||||
TNode<Context> context = LoadContextFromBaseline();
|
||||
|
||||
Callable callable =
|
||||
CodeFactory::LoadGlobalICInOptimizedCode(isolate(), typeof_mode);
|
||||
TailCallStub(callable, context, name, slot, vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateLookupContextBaseline(TypeofMode typeof_mode) {
|
||||
using Descriptor = LookupBaselineDescriptor;
|
||||
auto depth = Parameter<TaggedIndex>(Descriptor::kDepth);
|
||||
TNode<Context> context = LoadContextFromBaseline();
|
||||
|
||||
Label slowpath(this, Label::kDeferred);
|
||||
|
||||
// Check for context extensions to allow the fast path.
|
||||
TNode<Context> slot_context = GotoIfHasContextExtensionUpToDepth(
|
||||
context, Unsigned(TruncateWordToInt32(TaggedIndexToIntPtr(depth))),
|
||||
&slowpath);
|
||||
|
||||
// Fast path does a normal load context.
|
||||
{
|
||||
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
|
||||
Return(LoadContextElement(slot_context, TaggedIndexToIntPtr(slot)));
|
||||
}
|
||||
|
||||
// Slow path when we have to call out to the runtime.
|
||||
BIND(&slowpath);
|
||||
{
|
||||
auto name = Parameter<Object>(Descriptor::kName);
|
||||
Runtime::FunctionId function_id = typeof_mode == INSIDE_TYPEOF
|
||||
? Runtime::kLoadLookupSlotInsideTypeof
|
||||
: Runtime::kLoadLookupSlot;
|
||||
TailCallRuntime(function_id, context, name);
|
||||
}
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateLookupGlobalICBaseline(TypeofMode typeof_mode) {
|
||||
using Descriptor = LookupBaselineDescriptor;
|
||||
|
||||
auto name = Parameter<Object>(Descriptor::kName);
|
||||
auto depth = Parameter<TaggedIndex>(Descriptor::kDepth);
|
||||
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
|
||||
TNode<Context> context = LoadContextFromBaseline();
|
||||
|
||||
Label slowpath(this, Label::kDeferred);
|
||||
|
||||
// Check for context extensions to allow the fast path
|
||||
GotoIfHasContextExtensionUpToDepth(
|
||||
context, Unsigned(TruncateWordToInt32(TaggedIndexToIntPtr(depth))),
|
||||
&slowpath);
|
||||
|
||||
// Fast path does a normal load global
|
||||
{
|
||||
Callable callable =
|
||||
CodeFactory::LoadGlobalICInOptimizedCode(isolate(), typeof_mode);
|
||||
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
|
||||
TailCallStub(callable, context, name, slot, vector);
|
||||
}
|
||||
|
||||
// Slow path when we have to call out to the runtime
|
||||
BIND(&slowpath);
|
||||
Runtime::FunctionId function_id = typeof_mode == INSIDE_TYPEOF
|
||||
? Runtime::kLoadLookupSlotInsideTypeof
|
||||
: Runtime::kLoadLookupSlot;
|
||||
TailCallRuntime(function_id, context, name);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateKeyedLoadIC() {
|
||||
using Descriptor = LoadWithVectorDescriptor;
|
||||
|
||||
@ -3942,6 +4063,19 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
|
||||
vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateKeyedLoadICBaseline() {
|
||||
using Descriptor = LoadBaselineDescriptor;
|
||||
|
||||
auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
||||
auto name = Parameter<Object>(Descriptor::kName);
|
||||
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
|
||||
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
|
||||
TNode<Context> context = LoadContextFromBaseline();
|
||||
|
||||
TailCallBuiltin(Builtins::kKeyedLoadIC, context, receiver, name, slot,
|
||||
vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() {
|
||||
using Descriptor = LoadDescriptor;
|
||||
|
||||
@ -3993,6 +4127,18 @@ void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
|
||||
TailCallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot, vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateStoreGlobalICBaseline() {
|
||||
using Descriptor = StoreGlobalBaselineDescriptor;
|
||||
|
||||
auto name = Parameter<Object>(Descriptor::kName);
|
||||
auto value = Parameter<Object>(Descriptor::kValue);
|
||||
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
|
||||
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
|
||||
TNode<Context> context = LoadContextFromBaseline();
|
||||
|
||||
TailCallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot, vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateStoreIC() {
|
||||
using Descriptor = StoreWithVectorDescriptor;
|
||||
|
||||
@ -4021,6 +4167,20 @@ void AccessorAssembler::GenerateStoreICTrampoline() {
|
||||
vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateStoreICBaseline() {
|
||||
using Descriptor = StoreBaselineDescriptor;
|
||||
|
||||
auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
||||
auto name = Parameter<Object>(Descriptor::kName);
|
||||
auto value = Parameter<Object>(Descriptor::kValue);
|
||||
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
|
||||
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
|
||||
TNode<Context> context = LoadContextFromBaseline();
|
||||
|
||||
TailCallBuiltin(Builtins::kStoreIC, context, receiver, name, value, slot,
|
||||
vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateKeyedStoreIC() {
|
||||
using Descriptor = StoreWithVectorDescriptor;
|
||||
|
||||
@ -4049,6 +4209,20 @@ void AccessorAssembler::GenerateKeyedStoreICTrampoline() {
|
||||
vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateKeyedStoreICBaseline() {
|
||||
using Descriptor = StoreBaselineDescriptor;
|
||||
|
||||
auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
||||
auto name = Parameter<Object>(Descriptor::kName);
|
||||
auto value = Parameter<Object>(Descriptor::kValue);
|
||||
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
|
||||
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
|
||||
TNode<Context> context = LoadContextFromBaseline();
|
||||
|
||||
TailCallBuiltin(Builtins::kKeyedStoreIC, context, receiver, name, value, slot,
|
||||
vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
|
||||
using Descriptor = StoreWithVectorDescriptor;
|
||||
|
||||
@ -4063,6 +4237,21 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
|
||||
StoreInArrayLiteralIC(&p);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateStoreInArrayLiteralICBaseline() {
|
||||
using Descriptor = StoreBaselineDescriptor;
|
||||
|
||||
auto array = Parameter<Object>(Descriptor::kReceiver);
|
||||
auto index = Parameter<Object>(Descriptor::kName);
|
||||
auto value = Parameter<Object>(Descriptor::kValue);
|
||||
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
|
||||
|
||||
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
|
||||
TNode<Context> context = LoadContextFromBaseline();
|
||||
|
||||
TailCallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array, index,
|
||||
value, slot, vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateCloneObjectIC_Slow() {
|
||||
using Descriptor = CloneObjectWithVectorDescriptor;
|
||||
auto source = Parameter<Object>(Descriptor::kSource);
|
||||
@ -4116,6 +4305,20 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
|
||||
Return(result);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateCloneObjectICBaseline() {
|
||||
using Descriptor = CloneObjectBaselineDescriptor;
|
||||
|
||||
auto source = Parameter<Object>(Descriptor::kSource);
|
||||
auto flags = Parameter<Smi>(Descriptor::kFlags);
|
||||
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
|
||||
|
||||
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
|
||||
TNode<Context> context = LoadContextFromBaseline();
|
||||
|
||||
TailCallBuiltin(Builtins::kCloneObjectIC, context, source, flags, slot,
|
||||
vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateCloneObjectIC() {
|
||||
using Descriptor = CloneObjectWithVectorDescriptor;
|
||||
auto source = Parameter<Object>(Descriptor::kSource);
|
||||
@ -4275,6 +4478,18 @@ void AccessorAssembler::GenerateKeyedHasIC() {
|
||||
KeyedLoadIC(&p, LoadAccessMode::kHas);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateKeyedHasICBaseline() {
|
||||
using Descriptor = LoadBaselineDescriptor;
|
||||
|
||||
auto receiver = Parameter<Object>(Descriptor::kReceiver);
|
||||
auto name = Parameter<Object>(Descriptor::kName);
|
||||
auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
|
||||
TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
|
||||
TNode<Context> context = LoadContextFromBaseline();
|
||||
|
||||
TailCallBuiltin(Builtins::kKeyedHasIC, context, receiver, name, slot, vector);
|
||||
}
|
||||
|
||||
void AccessorAssembler::GenerateKeyedHasIC_Megamorphic() {
|
||||
using Descriptor = LoadWithVectorDescriptor;
|
||||
|
||||
|
@ -31,30 +31,42 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
|
||||
void GenerateLoadIC_NoFeedback();
|
||||
void GenerateLoadGlobalIC_NoFeedback();
|
||||
void GenerateLoadICTrampoline();
|
||||
void GenerateLoadICBaseline();
|
||||
void GenerateLoadICTrampoline_Megamorphic();
|
||||
void GenerateLoadSuperIC();
|
||||
void GenerateLoadSuperICBaseline();
|
||||
void GenerateKeyedLoadIC();
|
||||
void GenerateKeyedLoadIC_Megamorphic();
|
||||
void GenerateKeyedLoadIC_PolymorphicName();
|
||||
void GenerateKeyedLoadICTrampoline();
|
||||
void GenerateKeyedLoadICBaseline();
|
||||
void GenerateKeyedLoadICTrampoline_Megamorphic();
|
||||
void GenerateStoreIC();
|
||||
void GenerateStoreICTrampoline();
|
||||
void GenerateStoreICBaseline();
|
||||
void GenerateStoreGlobalIC();
|
||||
void GenerateStoreGlobalICTrampoline();
|
||||
void GenerateStoreGlobalICBaseline();
|
||||
void GenerateCloneObjectIC();
|
||||
void GenerateCloneObjectICBaseline();
|
||||
void GenerateCloneObjectIC_Slow();
|
||||
void GenerateKeyedHasIC();
|
||||
void GenerateKeyedHasICBaseline();
|
||||
void GenerateKeyedHasIC_Megamorphic();
|
||||
void GenerateKeyedHasIC_PolymorphicName();
|
||||
|
||||
void GenerateLoadGlobalIC(TypeofMode typeof_mode);
|
||||
void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
|
||||
void GenerateLoadGlobalICBaseline(TypeofMode typeof_mode);
|
||||
void GenerateLookupGlobalICBaseline(TypeofMode typeof_mode);
|
||||
void GenerateLookupContextBaseline(TypeofMode typeof_mode);
|
||||
|
||||
void GenerateKeyedStoreIC();
|
||||
void GenerateKeyedStoreICTrampoline();
|
||||
void GenerateKeyedStoreICBaseline();
|
||||
|
||||
void GenerateStoreInArrayLiteralIC();
|
||||
void GenerateStoreInArrayLiteralICBaseline();
|
||||
|
||||
void TryProbeStubCache(StubCache* stub_cache,
|
||||
TNode<Object> lookup_start_object, TNode<Name> name,
|
||||
|
@ -10,9 +10,9 @@ namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
|
||||
TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
|
||||
TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool rhs_known_smi) {
|
||||
const LazyNode<Context>& context, TNode<Object> lhs, TNode<Object> rhs,
|
||||
TNode<UintPtrT> slot_id, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool rhs_known_smi) {
|
||||
// Shared entry for floating point addition.
|
||||
Label do_fadd(this), if_lhsisnotnumber(this, Label::kDeferred),
|
||||
check_rhsisoddball(this, Label::kDeferred),
|
||||
@ -69,8 +69,8 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
|
||||
// Not overflowed.
|
||||
{
|
||||
var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
|
||||
slot_id);
|
||||
MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(),
|
||||
slot_id, guaranteed_feedback);
|
||||
var_result = smi_result;
|
||||
Goto(&end);
|
||||
}
|
||||
@ -118,7 +118,8 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
|
||||
BIND(&do_fadd);
|
||||
{
|
||||
var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
|
||||
MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(),
|
||||
slot_id, guaranteed_feedback);
|
||||
TNode<Float64T> value =
|
||||
Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
|
||||
TNode<HeapNumber> result = AllocateHeapNumberWithValue(value);
|
||||
@ -169,10 +170,10 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
|
||||
&call_with_any_feedback);
|
||||
|
||||
var_type_feedback = SmiConstant(BinaryOperationFeedback::kString);
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
|
||||
slot_id);
|
||||
MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(),
|
||||
slot_id, guaranteed_feedback);
|
||||
var_result =
|
||||
CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs);
|
||||
CallBuiltin(Builtins::kStringAdd_CheckNone, context(), lhs, rhs);
|
||||
|
||||
Goto(&end);
|
||||
}
|
||||
@ -194,20 +195,22 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
|
||||
{
|
||||
// Both {lhs} and {rhs} are of BigInt type.
|
||||
Label bigint_too_big(this);
|
||||
var_result = CallBuiltin(Builtins::kBigIntAddNoThrow, context, lhs, rhs);
|
||||
var_result = CallBuiltin(Builtins::kBigIntAddNoThrow, context(), lhs, rhs);
|
||||
// Check for sentinel that signals BigIntTooBig exception.
|
||||
GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big);
|
||||
|
||||
var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
|
||||
MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(),
|
||||
slot_id, guaranteed_feedback);
|
||||
Goto(&end);
|
||||
|
||||
BIND(&bigint_too_big);
|
||||
{
|
||||
// Update feedback to prevent deopt loop.
|
||||
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
|
||||
maybe_feedback_vector, slot_id);
|
||||
ThrowRangeError(context, MessageTemplate::kBigIntTooBig);
|
||||
MaybeUpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
|
||||
maybe_feedback_vector(), slot_id,
|
||||
guaranteed_feedback);
|
||||
ThrowRangeError(context(), MessageTemplate::kBigIntTooBig);
|
||||
}
|
||||
}
|
||||
|
||||
@ -225,8 +228,9 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
|
||||
|
||||
BIND(&call_add_stub);
|
||||
{
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
|
||||
var_result = CallBuiltin(Builtins::kAdd, context, lhs, rhs);
|
||||
MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(),
|
||||
slot_id, guaranteed_feedback);
|
||||
var_result = CallBuiltin(Builtins::kAdd, context(), lhs, rhs);
|
||||
Goto(&end);
|
||||
}
|
||||
|
||||
@ -235,10 +239,10 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
|
||||
}
|
||||
|
||||
TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
|
||||
TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
|
||||
TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
|
||||
const LazyNode<Context>& context, TNode<Object> lhs, TNode<Object> rhs,
|
||||
TNode<UintPtrT> slot_id, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
const SmiOperation& smiOperation, const FloatOperation& floatOperation,
|
||||
Operation op, bool rhs_known_smi) {
|
||||
Operation op, bool guaranteed_feedback, bool rhs_known_smi) {
|
||||
Label do_float_operation(this), end(this), call_stub(this),
|
||||
check_rhsisoddball(this, Label::kDeferred), call_with_any_feedback(this),
|
||||
if_lhsisnotnumber(this, Label::kDeferred),
|
||||
@ -285,7 +289,8 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
|
||||
{
|
||||
Comment("perform smi operation");
|
||||
var_result = smiOperation(lhs_smi, CAST(rhs), &var_type_feedback);
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
|
||||
MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(),
|
||||
slot_id, guaranteed_feedback);
|
||||
Goto(&end);
|
||||
}
|
||||
}
|
||||
@ -328,7 +333,8 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
|
||||
BIND(&do_float_operation);
|
||||
{
|
||||
var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
|
||||
MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(),
|
||||
slot_id, guaranteed_feedback);
|
||||
TNode<Float64T> lhs_value = var_float_lhs.value();
|
||||
TNode<Float64T> rhs_value = var_float_rhs.value();
|
||||
TNode<Float64T> value = floatOperation(lhs_value, rhs_value);
|
||||
@ -392,11 +398,12 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
|
||||
BIND(&if_both_bigint);
|
||||
{
|
||||
var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
|
||||
MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(),
|
||||
slot_id, guaranteed_feedback);
|
||||
if (op == Operation::kSubtract) {
|
||||
Label bigint_too_big(this);
|
||||
var_result =
|
||||
CallBuiltin(Builtins::kBigIntSubtractNoThrow, context, lhs, rhs);
|
||||
CallBuiltin(Builtins::kBigIntSubtractNoThrow, context(), lhs, rhs);
|
||||
|
||||
// Check for sentinel that signals BigIntTooBig exception.
|
||||
GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big);
|
||||
@ -405,12 +412,13 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
|
||||
BIND(&bigint_too_big);
|
||||
{
|
||||
// Update feedback to prevent deopt loop.
|
||||
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
|
||||
maybe_feedback_vector, slot_id);
|
||||
ThrowRangeError(context, MessageTemplate::kBigIntTooBig);
|
||||
MaybeUpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
|
||||
maybe_feedback_vector(), slot_id,
|
||||
guaranteed_feedback);
|
||||
ThrowRangeError(context(), MessageTemplate::kBigIntTooBig);
|
||||
}
|
||||
} else {
|
||||
var_result = CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
|
||||
var_result = CallRuntime(Runtime::kBigIntBinaryOp, context(), lhs, rhs,
|
||||
SmiConstant(op));
|
||||
Goto(&end);
|
||||
}
|
||||
@ -424,20 +432,21 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
|
||||
|
||||
BIND(&call_stub);
|
||||
{
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
|
||||
MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(),
|
||||
slot_id, guaranteed_feedback);
|
||||
TNode<Object> result;
|
||||
switch (op) {
|
||||
case Operation::kSubtract:
|
||||
result = CallBuiltin(Builtins::kSubtract, context, lhs, rhs);
|
||||
result = CallBuiltin(Builtins::kSubtract, context(), lhs, rhs);
|
||||
break;
|
||||
case Operation::kMultiply:
|
||||
result = CallBuiltin(Builtins::kMultiply, context, lhs, rhs);
|
||||
result = CallBuiltin(Builtins::kMultiply, context(), lhs, rhs);
|
||||
break;
|
||||
case Operation::kDivide:
|
||||
result = CallBuiltin(Builtins::kDivide, context, lhs, rhs);
|
||||
result = CallBuiltin(Builtins::kDivide, context(), lhs, rhs);
|
||||
break;
|
||||
case Operation::kModulus:
|
||||
result = CallBuiltin(Builtins::kModulus, context, lhs, rhs);
|
||||
result = CallBuiltin(Builtins::kModulus, context(), lhs, rhs);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
@ -451,9 +460,9 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
|
||||
}
|
||||
|
||||
TNode<Object> BinaryOpAssembler::Generate_SubtractWithFeedback(
|
||||
TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
|
||||
TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool rhs_known_smi) {
|
||||
const LazyNode<Context>& context, TNode<Object> lhs, TNode<Object> rhs,
|
||||
TNode<UintPtrT> slot_id, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool rhs_known_smi) {
|
||||
auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
|
||||
TVariable<Smi>* var_type_feedback) {
|
||||
Label end(this);
|
||||
@ -483,13 +492,13 @@ TNode<Object> BinaryOpAssembler::Generate_SubtractWithFeedback(
|
||||
};
|
||||
return Generate_BinaryOperationWithFeedback(
|
||||
context, lhs, rhs, slot_id, maybe_feedback_vector, smiFunction,
|
||||
floatFunction, Operation::kSubtract, rhs_known_smi);
|
||||
floatFunction, Operation::kSubtract, guaranteed_feedback, rhs_known_smi);
|
||||
}
|
||||
|
||||
TNode<Object> BinaryOpAssembler::Generate_MultiplyWithFeedback(
|
||||
TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
|
||||
TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool rhs_known_smi) {
|
||||
const LazyNode<Context>& context, TNode<Object> lhs, TNode<Object> rhs,
|
||||
TNode<UintPtrT> slot_id, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool rhs_known_smi) {
|
||||
auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
|
||||
TVariable<Smi>* var_type_feedback) {
|
||||
TNode<Number> result = SmiMul(lhs, rhs);
|
||||
@ -503,12 +512,13 @@ TNode<Object> BinaryOpAssembler::Generate_MultiplyWithFeedback(
|
||||
};
|
||||
return Generate_BinaryOperationWithFeedback(
|
||||
context, lhs, rhs, slot_id, maybe_feedback_vector, smiFunction,
|
||||
floatFunction, Operation::kMultiply, rhs_known_smi);
|
||||
floatFunction, Operation::kMultiply, guaranteed_feedback, rhs_known_smi);
|
||||
}
|
||||
|
||||
TNode<Object> BinaryOpAssembler::Generate_DivideWithFeedback(
|
||||
TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
|
||||
TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
|
||||
const LazyNode<Context>& context, TNode<Object> dividend,
|
||||
TNode<Object> divisor, TNode<UintPtrT> slot_id,
|
||||
const LazyNode<HeapObject>& maybe_feedback_vector, bool guaranteed_feedback,
|
||||
bool rhs_known_smi) {
|
||||
auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
|
||||
TVariable<Smi>* var_type_feedback) {
|
||||
@ -539,12 +549,13 @@ TNode<Object> BinaryOpAssembler::Generate_DivideWithFeedback(
|
||||
};
|
||||
return Generate_BinaryOperationWithFeedback(
|
||||
context, dividend, divisor, slot_id, maybe_feedback_vector, smiFunction,
|
||||
floatFunction, Operation::kDivide, rhs_known_smi);
|
||||
floatFunction, Operation::kDivide, guaranteed_feedback, rhs_known_smi);
|
||||
}
|
||||
|
||||
TNode<Object> BinaryOpAssembler::Generate_ModulusWithFeedback(
|
||||
TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
|
||||
TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
|
||||
const LazyNode<Context>& context, TNode<Object> dividend,
|
||||
TNode<Object> divisor, TNode<UintPtrT> slot_id,
|
||||
const LazyNode<HeapObject>& maybe_feedback_vector, bool guaranteed_feedback,
|
||||
bool rhs_known_smi) {
|
||||
auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
|
||||
TVariable<Smi>* var_type_feedback) {
|
||||
@ -559,22 +570,24 @@ TNode<Object> BinaryOpAssembler::Generate_ModulusWithFeedback(
|
||||
};
|
||||
return Generate_BinaryOperationWithFeedback(
|
||||
context, dividend, divisor, slot_id, maybe_feedback_vector, smiFunction,
|
||||
floatFunction, Operation::kModulus, rhs_known_smi);
|
||||
floatFunction, Operation::kModulus, guaranteed_feedback, rhs_known_smi);
|
||||
}
|
||||
|
||||
TNode<Object> BinaryOpAssembler::Generate_ExponentiateWithFeedback(
|
||||
TNode<Context> context, TNode<Object> base, TNode<Object> exponent,
|
||||
TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
|
||||
const LazyNode<Context>& context, TNode<Object> base,
|
||||
TNode<Object> exponent, TNode<UintPtrT> slot_id,
|
||||
const LazyNode<HeapObject>& maybe_feedback_vector, bool guaranteed_feedback,
|
||||
bool rhs_known_smi) {
|
||||
// We currently don't optimize exponentiation based on feedback.
|
||||
TNode<Smi> dummy_feedback = SmiConstant(BinaryOperationFeedback::kAny);
|
||||
UpdateFeedback(dummy_feedback, maybe_feedback_vector, slot_id);
|
||||
return CallBuiltin(Builtins::kExponentiate, context, base, exponent);
|
||||
MaybeUpdateFeedback(dummy_feedback, maybe_feedback_vector(), slot_id,
|
||||
guaranteed_feedback);
|
||||
return CallBuiltin(Builtins::kExponentiate, context(), base, exponent);
|
||||
}
|
||||
|
||||
TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
|
||||
Operation bitwise_op, TNode<Object> left, TNode<Object> right,
|
||||
TNode<Context> context, TVariable<Smi>* feedback) {
|
||||
const LazyNode<Context>& context, TVariable<Smi>* feedback) {
|
||||
TVARIABLE(Object, result);
|
||||
TVARIABLE(Smi, var_left_feedback);
|
||||
TVARIABLE(Smi, var_right_feedback);
|
||||
@ -592,14 +605,14 @@ TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
|
||||
Label if_left_bigint(this), do_bigint_op(this);
|
||||
|
||||
TaggedToWord32OrBigIntWithFeedback(
|
||||
context, left, &if_left_number, &var_left_word32, &if_left_bigint,
|
||||
context(), left, &if_left_number, &var_left_word32, &if_left_bigint,
|
||||
&var_left_bigint, feedback ? &var_left_feedback : nullptr);
|
||||
|
||||
Label right_is_bigint(this);
|
||||
BIND(&if_left_number);
|
||||
{
|
||||
TaggedToWord32OrBigIntWithFeedback(
|
||||
context, right, &do_number_op, &var_right_word32, &right_is_bigint,
|
||||
context(), right, &do_number_op, &var_right_word32, &right_is_bigint,
|
||||
&var_right_bigint, feedback ? &var_right_feedback : nullptr);
|
||||
}
|
||||
|
||||
@ -631,7 +644,7 @@ TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
|
||||
// BigInt cases.
|
||||
BIND(&if_left_bigint);
|
||||
{
|
||||
TaggedToNumericWithFeedback(context, right, &var_right_maybe_bigint,
|
||||
TaggedToNumericWithFeedback(context(), right, &var_right_maybe_bigint,
|
||||
&var_right_feedback);
|
||||
var_left_maybe_bigint = var_left_bigint.value();
|
||||
Goto(&do_bigint_op);
|
||||
@ -643,7 +656,7 @@ TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
|
||||
*feedback = SmiOr(var_left_feedback.value(), var_right_feedback.value());
|
||||
}
|
||||
result = CallRuntime(
|
||||
Runtime::kBigIntBinaryOp, context, var_left_maybe_bigint.value(),
|
||||
Runtime::kBigIntBinaryOp, context(), var_left_maybe_bigint.value(),
|
||||
var_right_maybe_bigint.value(), SmiConstant(bitwise_op));
|
||||
Goto(&done);
|
||||
}
|
||||
|
@ -21,106 +21,113 @@ class BinaryOpAssembler : public CodeStubAssembler {
|
||||
: CodeStubAssembler(state) {}
|
||||
|
||||
TNode<Object> Generate_AddWithFeedback(
|
||||
TNode<Context> context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool rhs_known_smi);
|
||||
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool rhs_known_smi);
|
||||
|
||||
TNode<Object> Generate_SubtractWithFeedback(
|
||||
TNode<Context> context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool rhs_known_smi);
|
||||
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool rhs_known_smi);
|
||||
|
||||
TNode<Object> Generate_MultiplyWithFeedback(
|
||||
TNode<Context> context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool rhs_known_smi);
|
||||
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool rhs_known_smi);
|
||||
|
||||
TNode<Object> Generate_DivideWithFeedback(
|
||||
TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool rhs_known_smi);
|
||||
const LazyNode<Context>& context, TNode<Object> dividend,
|
||||
TNode<Object> divisor, TNode<UintPtrT> slot,
|
||||
const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool rhs_known_smi);
|
||||
|
||||
TNode<Object> Generate_ModulusWithFeedback(
|
||||
TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool rhs_known_smi);
|
||||
const LazyNode<Context>& context, TNode<Object> dividend,
|
||||
TNode<Object> divisor, TNode<UintPtrT> slot,
|
||||
const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool rhs_known_smi);
|
||||
|
||||
TNode<Object> Generate_ExponentiateWithFeedback(
|
||||
TNode<Context> context, TNode<Object> base, TNode<Object> exponent,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool rhs_known_smi);
|
||||
const LazyNode<Context>& context, TNode<Object> base,
|
||||
TNode<Object> exponent, TNode<UintPtrT> slot,
|
||||
const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool rhs_known_smi);
|
||||
|
||||
TNode<Object> Generate_BitwiseOrWithFeedback(
|
||||
TNode<Context> context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool /* unused */) {
|
||||
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool /* unused */) {
|
||||
TVARIABLE(Smi, feedback);
|
||||
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
|
||||
Operation::kBitwiseOr, left, right, context, &feedback);
|
||||
UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
|
||||
MaybeUpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
|
||||
guaranteed_feedback);
|
||||
return result;
|
||||
}
|
||||
|
||||
TNode<Object> Generate_BitwiseXorWithFeedback(
|
||||
TNode<Context> context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool /* unused */) {
|
||||
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool /* unused */) {
|
||||
TVARIABLE(Smi, feedback);
|
||||
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
|
||||
Operation::kBitwiseXor, left, right, context, &feedback);
|
||||
UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
|
||||
MaybeUpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
|
||||
guaranteed_feedback);
|
||||
return result;
|
||||
}
|
||||
|
||||
TNode<Object> Generate_BitwiseAndWithFeedback(
|
||||
TNode<Context> context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool /* unused */) {
|
||||
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool /* unused */) {
|
||||
TVARIABLE(Smi, feedback);
|
||||
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
|
||||
Operation::kBitwiseAnd, left, right, context, &feedback);
|
||||
UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
|
||||
MaybeUpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
|
||||
guaranteed_feedback);
|
||||
return result;
|
||||
}
|
||||
|
||||
TNode<Object> Generate_ShiftLeftWithFeedback(
|
||||
TNode<Context> context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool /* unused */) {
|
||||
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool /* unused */) {
|
||||
TVARIABLE(Smi, feedback);
|
||||
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
|
||||
Operation::kShiftLeft, left, right, context, &feedback);
|
||||
UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
|
||||
MaybeUpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
|
||||
guaranteed_feedback);
|
||||
return result;
|
||||
}
|
||||
|
||||
TNode<Object> Generate_ShiftRightWithFeedback(
|
||||
TNode<Context> context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool /* unused */) {
|
||||
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool /* unused */) {
|
||||
TVARIABLE(Smi, feedback);
|
||||
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
|
||||
Operation::kShiftRight, left, right, context, &feedback);
|
||||
UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
|
||||
MaybeUpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
|
||||
guaranteed_feedback);
|
||||
return result;
|
||||
}
|
||||
|
||||
TNode<Object> Generate_ShiftRightLogicalWithFeedback(
|
||||
TNode<Context> context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool /* unused */) {
|
||||
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool /* unused */) {
|
||||
TVARIABLE(Smi, feedback);
|
||||
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
|
||||
Operation::kShiftRightLogical, left, right, context, &feedback);
|
||||
UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
|
||||
MaybeUpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
|
||||
guaranteed_feedback);
|
||||
return result;
|
||||
}
|
||||
|
||||
TNode<Object> Generate_BitwiseBinaryOpWithFeedback(Operation bitwise_op,
|
||||
TNode<Object> left,
|
||||
TNode<Object> right,
|
||||
TNode<Context> context,
|
||||
TVariable<Smi>* feedback) {
|
||||
TNode<Object> Generate_BitwiseBinaryOpWithFeedback(
|
||||
Operation bitwise_op, TNode<Object> left, TNode<Object> right,
|
||||
const LazyNode<Context>& context, TVariable<Smi>* feedback) {
|
||||
return Generate_BitwiseBinaryOpWithOptionalFeedback(bitwise_op, left, right,
|
||||
context, feedback);
|
||||
}
|
||||
@ -129,8 +136,8 @@ class BinaryOpAssembler : public CodeStubAssembler {
|
||||
TNode<Object> left,
|
||||
TNode<Object> right,
|
||||
TNode<Context> context) {
|
||||
return Generate_BitwiseBinaryOpWithOptionalFeedback(bitwise_op, left, right,
|
||||
context, nullptr);
|
||||
return Generate_BitwiseBinaryOpWithOptionalFeedback(
|
||||
bitwise_op, left, right, [&] { return context; }, nullptr);
|
||||
}
|
||||
|
||||
private:
|
||||
@ -140,14 +147,14 @@ class BinaryOpAssembler : public CodeStubAssembler {
|
||||
std::function<TNode<Float64T>(TNode<Float64T>, TNode<Float64T>)>;
|
||||
|
||||
TNode<Object> Generate_BinaryOperationWithFeedback(
|
||||
TNode<Context> context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
const SmiOperation& smiOperation, const FloatOperation& floatOperation,
|
||||
Operation op, bool rhs_known_smi);
|
||||
Operation op, bool guaranteed_feedback, bool rhs_known_smi);
|
||||
|
||||
TNode<Object> Generate_BitwiseBinaryOpWithOptionalFeedback(
|
||||
Operation bitwise_op, TNode<Object> left, TNode<Object> right,
|
||||
TNode<Context> context, TVariable<Smi>* feedback);
|
||||
const LazyNode<Context>& context, TVariable<Smi>* feedback);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
@ -16,9 +16,11 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
|
||||
explicit UnaryOpAssemblerImpl(compiler::CodeAssemblerState* state)
|
||||
: CodeStubAssembler(state) {}
|
||||
|
||||
// TODO(v8:11429): Change `bool guaranteed_feedback` to an enum.
|
||||
TNode<Object> BitwiseNot(TNode<Context> context, TNode<Object> value,
|
||||
TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector) {
|
||||
TNode<HeapObject> maybe_feedback_vector,
|
||||
bool guaranteed_feedback) {
|
||||
// TODO(jgruber): Make this implementation more consistent with other unary
|
||||
// ops (i.e. have them all use UnaryOpWithFeedback or some other common
|
||||
// mechanism).
|
||||
@ -37,14 +39,14 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
|
||||
TNode<Smi> result_type = SelectSmiConstant(
|
||||
TaggedIsSmi(var_result.value()), BinaryOperationFeedback::kSignedSmall,
|
||||
BinaryOperationFeedback::kNumber);
|
||||
UpdateFeedback(SmiOr(result_type, var_feedback.value()),
|
||||
maybe_feedback_vector, slot);
|
||||
MaybeUpdateFeedback(SmiOr(result_type, var_feedback.value()),
|
||||
maybe_feedback_vector, slot, guaranteed_feedback);
|
||||
Goto(&out);
|
||||
|
||||
// BigInt case.
|
||||
BIND(&if_bigint);
|
||||
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kBigInt),
|
||||
maybe_feedback_vector, slot);
|
||||
MaybeUpdateFeedback(SmiConstant(BinaryOperationFeedback::kBigInt),
|
||||
maybe_feedback_vector, slot, guaranteed_feedback);
|
||||
var_result =
|
||||
CallRuntime(Runtime::kBigIntUnaryOp, context, var_bigint.value(),
|
||||
SmiConstant(Operation::kBitwiseNot));
|
||||
@ -56,21 +58,24 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
|
||||
|
||||
TNode<Object> Decrement(TNode<Context> context, TNode<Object> value,
|
||||
TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector) {
|
||||
return IncrementOrDecrement<Operation::kDecrement>(context, value, slot,
|
||||
maybe_feedback_vector);
|
||||
TNode<HeapObject> maybe_feedback_vector,
|
||||
bool guaranteed_feedback) {
|
||||
return IncrementOrDecrement<Operation::kDecrement>(
|
||||
context, value, slot, maybe_feedback_vector, guaranteed_feedback);
|
||||
}
|
||||
|
||||
TNode<Object> Increment(TNode<Context> context, TNode<Object> value,
|
||||
TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector) {
|
||||
return IncrementOrDecrement<Operation::kIncrement>(context, value, slot,
|
||||
maybe_feedback_vector);
|
||||
TNode<HeapObject> maybe_feedback_vector,
|
||||
bool guaranteed_feedback) {
|
||||
return IncrementOrDecrement<Operation::kIncrement>(
|
||||
context, value, slot, maybe_feedback_vector, guaranteed_feedback);
|
||||
}
|
||||
|
||||
TNode<Object> Negate(TNode<Context> context, TNode<Object> value,
|
||||
TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector) {
|
||||
TNode<HeapObject> maybe_feedback_vector,
|
||||
bool guaranteed_feedback) {
|
||||
SmiOperation smi_op = [=](TNode<Smi> smi_value,
|
||||
TVariable<Smi>* var_feedback, Label* do_float_op,
|
||||
TVariable<Float64T>* var_float) {
|
||||
@ -108,7 +113,8 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
|
||||
SmiConstant(Operation::kNegate)));
|
||||
};
|
||||
return UnaryOpWithFeedback(context, value, slot, maybe_feedback_vector,
|
||||
smi_op, float_op, bigint_op);
|
||||
smi_op, float_op, bigint_op,
|
||||
guaranteed_feedback);
|
||||
}
|
||||
|
||||
private:
|
||||
@ -125,7 +131,8 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
|
||||
TNode<HeapObject> maybe_feedback_vector,
|
||||
const SmiOperation& smi_op,
|
||||
const FloatOperation& float_op,
|
||||
const BigIntOperation& bigint_op) {
|
||||
const BigIntOperation& bigint_op,
|
||||
bool guaranteed_feedback) {
|
||||
TVARIABLE(Object, var_value, value);
|
||||
TVARIABLE(Object, var_result);
|
||||
TVARIABLE(Float64T, var_float_value);
|
||||
@ -207,14 +214,16 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
|
||||
}
|
||||
|
||||
BIND(&end);
|
||||
UpdateFeedback(var_feedback.value(), maybe_feedback_vector, slot);
|
||||
MaybeUpdateFeedback(var_feedback.value(), maybe_feedback_vector, slot,
|
||||
guaranteed_feedback);
|
||||
return var_result.value();
|
||||
}
|
||||
|
||||
template <Operation kOperation>
|
||||
TNode<Object> IncrementOrDecrement(TNode<Context> context,
|
||||
TNode<Object> value, TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector) {
|
||||
TNode<HeapObject> maybe_feedback_vector,
|
||||
bool guaranteed_feedback) {
|
||||
STATIC_ASSERT(kOperation == Operation::kIncrement ||
|
||||
kOperation == Operation::kDecrement);
|
||||
static constexpr int kAddValue =
|
||||
@ -245,7 +254,8 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
|
||||
SmiConstant(kOperation)));
|
||||
};
|
||||
return UnaryOpWithFeedback(context, value, slot, maybe_feedback_vector,
|
||||
smi_op, float_op, bigint_op);
|
||||
smi_op, float_op, bigint_op,
|
||||
guaranteed_feedback);
|
||||
}
|
||||
};
|
||||
|
||||
@ -253,30 +263,34 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
|
||||
|
||||
TNode<Object> UnaryOpAssembler::Generate_BitwiseNotWithFeedback(
|
||||
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector) {
|
||||
TNode<HeapObject> maybe_feedback_vector, bool guaranteed_feedback) {
|
||||
UnaryOpAssemblerImpl a(state_);
|
||||
return a.BitwiseNot(context, value, slot, maybe_feedback_vector);
|
||||
return a.BitwiseNot(context, value, slot, maybe_feedback_vector,
|
||||
guaranteed_feedback);
|
||||
}
|
||||
|
||||
TNode<Object> UnaryOpAssembler::Generate_DecrementWithFeedback(
|
||||
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector) {
|
||||
TNode<HeapObject> maybe_feedback_vector, bool guaranteed_feedback) {
|
||||
UnaryOpAssemblerImpl a(state_);
|
||||
return a.Decrement(context, value, slot, maybe_feedback_vector);
|
||||
return a.Decrement(context, value, slot, maybe_feedback_vector,
|
||||
guaranteed_feedback);
|
||||
}
|
||||
|
||||
TNode<Object> UnaryOpAssembler::Generate_IncrementWithFeedback(
|
||||
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector) {
|
||||
TNode<HeapObject> maybe_feedback_vector, bool guaranteed_feedback) {
|
||||
UnaryOpAssemblerImpl a(state_);
|
||||
return a.Increment(context, value, slot, maybe_feedback_vector);
|
||||
return a.Increment(context, value, slot, maybe_feedback_vector,
|
||||
guaranteed_feedback);
|
||||
}
|
||||
|
||||
TNode<Object> UnaryOpAssembler::Generate_NegateWithFeedback(
|
||||
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector) {
|
||||
TNode<HeapObject> maybe_feedback_vector, bool guaranteed_feedback) {
|
||||
UnaryOpAssemblerImpl a(state_);
|
||||
return a.Negate(context, value, slot, maybe_feedback_vector);
|
||||
return a.Negate(context, value, slot, maybe_feedback_vector,
|
||||
guaranteed_feedback);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
@ -21,19 +21,19 @@ class UnaryOpAssembler final {
|
||||
|
||||
TNode<Object> Generate_BitwiseNotWithFeedback(
|
||||
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector);
|
||||
TNode<HeapObject> maybe_feedback_vector, bool guaranteed_feedback);
|
||||
|
||||
TNode<Object> Generate_DecrementWithFeedback(
|
||||
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector);
|
||||
TNode<HeapObject> maybe_feedback_vector, bool guaranteed_feedback);
|
||||
|
||||
TNode<Object> Generate_IncrementWithFeedback(
|
||||
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector);
|
||||
TNode<HeapObject> maybe_feedback_vector, bool guaranteed_feedback);
|
||||
|
||||
TNode<Object> Generate_NegateWithFeedback(
|
||||
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
|
||||
TNode<HeapObject> maybe_feedback_vector);
|
||||
TNode<HeapObject> maybe_feedback_vector, bool guaranteed_feedback);
|
||||
|
||||
private:
|
||||
compiler::CodeAssemblerState* const state_;
|
||||
|
@ -196,6 +196,20 @@ Register BytecodeArrayAccessor::GetRegisterOperand(int operand_index) const {
|
||||
current_operand_scale());
|
||||
}
|
||||
|
||||
std::pair<Register, Register> BytecodeArrayAccessor::GetRegisterPairOperand(
|
||||
int operand_index) const {
|
||||
Register first = GetRegisterOperand(operand_index);
|
||||
Register second(first.index() + 1);
|
||||
return std::make_pair(first, second);
|
||||
}
|
||||
|
||||
RegisterList BytecodeArrayAccessor::GetRegisterListOperand(
|
||||
int operand_index) const {
|
||||
Register first = GetRegisterOperand(operand_index);
|
||||
uint32_t count = GetRegisterCountOperand(operand_index + 1);
|
||||
return RegisterList(first.index(), count);
|
||||
}
|
||||
|
||||
int BytecodeArrayAccessor::GetRegisterOperandRange(int operand_index) const {
|
||||
DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
|
||||
const OperandType* operand_types =
|
||||
|
@ -103,6 +103,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
|
||||
UpdateOperandScale();
|
||||
}
|
||||
void SetOffset(int offset);
|
||||
void Reset() { SetOffset(0); }
|
||||
|
||||
void ApplyDebugBreak();
|
||||
|
||||
@ -131,6 +132,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
|
||||
Register GetParameter(int parameter_index) const;
|
||||
uint32_t GetRegisterCountOperand(int operand_index) const;
|
||||
Register GetRegisterOperand(int operand_index) const;
|
||||
std::pair<Register, Register> GetRegisterPairOperand(int operand_index) const;
|
||||
RegisterList GetRegisterListOperand(int operand_index) const;
|
||||
int GetRegisterOperandRange(int operand_index) const;
|
||||
Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
|
||||
Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
|
||||
|
@ -32,6 +32,10 @@ static const int kCallerPCOffsetRegisterIndex =
|
||||
(InterpreterFrameConstants::kRegisterFileFromFp -
|
||||
InterpreterFrameConstants::kCallerPCOffset) /
|
||||
kSystemPointerSize;
|
||||
static const int kArgumentCountRegisterIndex =
|
||||
(InterpreterFrameConstants::kRegisterFileFromFp -
|
||||
InterpreterFrameConstants::kArgCOffset) /
|
||||
kSystemPointerSize;
|
||||
|
||||
Register Register::FromParameterIndex(int index, int parameter_count) {
|
||||
DCHECK_GE(index, 0);
|
||||
@ -83,6 +87,11 @@ Register Register::virtual_accumulator() {
|
||||
return Register(kCallerPCOffsetRegisterIndex);
|
||||
}
|
||||
|
||||
// static
|
||||
Register Register::argument_count() {
|
||||
return Register(kArgumentCountRegisterIndex);
|
||||
}
|
||||
|
||||
OperandSize Register::SizeOfOperand() const {
|
||||
int32_t operand = ToOperand();
|
||||
if (operand >= kMinInt8 && operand <= kMaxInt8) {
|
||||
|
@ -48,6 +48,9 @@ class V8_EXPORT_PRIVATE Register final {
|
||||
static Register bytecode_offset();
|
||||
bool is_bytecode_offset() const;
|
||||
|
||||
// Returns the register for the argument count.
|
||||
static Register argument_count();
|
||||
|
||||
// Returns a register that can be used to represent the accumulator
|
||||
// within code in the interpreter, but should never be emitted in
|
||||
// bytecode.
|
||||
@ -110,6 +113,10 @@ class RegisterList {
|
||||
DCHECK_LT(new_count, register_count_);
|
||||
return RegisterList(first_reg_index_, new_count);
|
||||
}
|
||||
const RegisterList PopLeft() {
|
||||
DCHECK_GE(register_count_, 0);
|
||||
return RegisterList(first_reg_index_ + 1, register_count_ - 1);
|
||||
}
|
||||
|
||||
const Register operator[](size_t i) const {
|
||||
DCHECK_LT(static_cast<int>(i), register_count_);
|
||||
@ -131,6 +138,7 @@ class RegisterList {
|
||||
friend class BytecodeDecoder;
|
||||
friend class InterpreterTester;
|
||||
friend class BytecodeUtils;
|
||||
friend class BytecodeArrayAccessor;
|
||||
|
||||
RegisterList(int first_reg_index, int register_count)
|
||||
: first_reg_index_(first_reg_index), register_count_(register_count) {}
|
||||
|
@ -202,41 +202,6 @@ TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
|
||||
return cur_context.value();
|
||||
}
|
||||
|
||||
void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(
|
||||
TNode<Context> context, TNode<Uint32T> depth, Label* target) {
|
||||
TVARIABLE(Context, cur_context, context);
|
||||
TVARIABLE(Uint32T, cur_depth, depth);
|
||||
|
||||
Label context_search(this, {&cur_depth, &cur_context});
|
||||
Label no_extension(this);
|
||||
|
||||
// Loop until the depth is 0.
|
||||
Goto(&context_search);
|
||||
BIND(&context_search);
|
||||
{
|
||||
// Check if context has an extension slot.
|
||||
TNode<BoolT> has_extension =
|
||||
LoadScopeInfoHasExtensionField(LoadScopeInfo(cur_context.value()));
|
||||
GotoIfNot(has_extension, &no_extension);
|
||||
|
||||
// Jump to the target if the extension slot is not an undefined value.
|
||||
TNode<Object> extension_slot =
|
||||
LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
|
||||
Branch(TaggedNotEqual(extension_slot, UndefinedConstant()), target,
|
||||
&no_extension);
|
||||
|
||||
BIND(&no_extension);
|
||||
{
|
||||
cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
|
||||
cur_context = CAST(
|
||||
LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
|
||||
|
||||
GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
|
||||
&context_search);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(
|
||||
TNode<IntPtrT> reg_index) {
|
||||
return Signed(WordPoisonOnSpeculation(
|
||||
@ -1323,7 +1288,8 @@ void InterpreterAssembler::MaybeDropFrames(TNode<Context> context) {
|
||||
|
||||
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
|
||||
CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
|
||||
SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
|
||||
SmiTag(BytecodeOffset()), GetAccumulatorUnchecked(),
|
||||
FalseConstant());
|
||||
}
|
||||
|
||||
void InterpreterAssembler::TraceBytecodeDispatch(TNode<WordT> target_bytecode) {
|
||||
@ -1558,7 +1524,8 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
|
||||
TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
|
||||
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
|
||||
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index);
|
||||
MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
|
||||
slot_index, false);
|
||||
|
||||
SetAccumulator(var_result.value());
|
||||
Dispatch();
|
||||
|
@ -82,11 +82,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
|
||||
TNode<Context> GetContextAtDepth(TNode<Context> context,
|
||||
TNode<Uint32T> depth);
|
||||
|
||||
// Goto the given |target| if the context chain starting at |context| has any
|
||||
// extensions up to the given |depth|.
|
||||
void GotoIfHasContextExtensionUpToDepth(TNode<Context> context,
|
||||
TNode<Uint32T> depth, Label* target);
|
||||
|
||||
// A RegListNodePair provides an abstraction over lists of registers.
|
||||
class RegListNodePair {
|
||||
public:
|
||||
|
@ -222,18 +222,9 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
|
||||
TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
|
||||
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
|
||||
|
||||
Label no_feedback(this, Label::kDeferred), end(this);
|
||||
GotoIf(IsUndefined(maybe_vector), &no_feedback);
|
||||
|
||||
CallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot,
|
||||
maybe_vector);
|
||||
Goto(&end);
|
||||
|
||||
Bind(&no_feedback);
|
||||
CallRuntime(Runtime::kStoreGlobalICNoFeedback_Miss, context, value, name);
|
||||
Goto(&end);
|
||||
|
||||
Bind(&end);
|
||||
Dispatch();
|
||||
}
|
||||
|
||||
@ -353,11 +344,11 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
|
||||
Label slowpath(this, Label::kDeferred);
|
||||
|
||||
// Check for context extensions to allow the fast path.
|
||||
GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
|
||||
TNode<Context> slot_context =
|
||||
GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
|
||||
|
||||
// Fast path does a normal load context.
|
||||
{
|
||||
TNode<Context> slot_context = GetContextAtDepth(context, depth);
|
||||
TNode<Object> result = LoadContextElement(slot_context, slot_index);
|
||||
SetAccumulator(result);
|
||||
Dispatch();
|
||||
@ -853,9 +844,9 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
|
||||
: InterpreterAssembler(state, bytecode, operand_scale) {}
|
||||
|
||||
using BinaryOpGenerator = TNode<Object> (BinaryOpAssembler::*)(
|
||||
TNode<Context> context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
|
||||
bool rhs_known_smi);
|
||||
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
|
||||
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
|
||||
bool guaranteed_feedback, bool rhs_known_smi);
|
||||
|
||||
void BinaryOpWithFeedback(BinaryOpGenerator generator) {
|
||||
TNode<Object> lhs = LoadRegisterAtOperandIndex(0);
|
||||
@ -865,8 +856,9 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
|
||||
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
|
||||
|
||||
BinaryOpAssembler binop_asm(state());
|
||||
TNode<Object> result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
|
||||
maybe_feedback_vector, false);
|
||||
TNode<Object> result = (binop_asm.*generator)(
|
||||
[=] { return context; }, lhs, rhs, slot_index,
|
||||
[=] { return maybe_feedback_vector; }, false, false);
|
||||
SetAccumulator(result);
|
||||
Dispatch();
|
||||
}
|
||||
@ -879,8 +871,9 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
|
||||
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
|
||||
|
||||
BinaryOpAssembler binop_asm(state());
|
||||
TNode<Object> result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
|
||||
maybe_feedback_vector, true);
|
||||
TNode<Object> result = (binop_asm.*generator)(
|
||||
[=] { return context; }, lhs, rhs, slot_index,
|
||||
[=] { return maybe_feedback_vector; }, false, true);
|
||||
SetAccumulator(result);
|
||||
Dispatch();
|
||||
}
|
||||
@ -989,9 +982,10 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
|
||||
|
||||
BinaryOpAssembler binop_asm(state());
|
||||
TNode<Object> result = binop_asm.Generate_BitwiseBinaryOpWithFeedback(
|
||||
bitwise_op, left, right, context, &feedback);
|
||||
bitwise_op, left, right, [=] { return context; }, &feedback);
|
||||
|
||||
UpdateFeedback(feedback.value(), maybe_feedback_vector, slot_index);
|
||||
MaybeUpdateFeedback(feedback.value(), maybe_feedback_vector, slot_index,
|
||||
false);
|
||||
SetAccumulator(result);
|
||||
Dispatch();
|
||||
}
|
||||
@ -1017,14 +1011,14 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
|
||||
TNode<Smi> result_type = SelectSmiConstant(
|
||||
TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
|
||||
BinaryOperationFeedback::kNumber);
|
||||
UpdateFeedback(SmiOr(result_type, var_left_feedback.value()),
|
||||
maybe_feedback_vector, slot_index);
|
||||
MaybeUpdateFeedback(SmiOr(result_type, var_left_feedback.value()),
|
||||
maybe_feedback_vector, slot_index, false);
|
||||
SetAccumulator(result);
|
||||
Dispatch();
|
||||
|
||||
BIND(&if_bigint_mix);
|
||||
UpdateFeedback(var_left_feedback.value(), maybe_feedback_vector,
|
||||
slot_index);
|
||||
MaybeUpdateFeedback(var_left_feedback.value(), maybe_feedback_vector,
|
||||
slot_index, false);
|
||||
ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
|
||||
}
|
||||
};
|
||||
@ -1112,7 +1106,7 @@ IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
|
||||
|
||||
UnaryOpAssembler unary_op_asm(state());
|
||||
TNode<Object> result = unary_op_asm.Generate_BitwiseNotWithFeedback(
|
||||
context, value, slot_index, maybe_feedback_vector);
|
||||
context, value, slot_index, maybe_feedback_vector, false);
|
||||
|
||||
SetAccumulator(result);
|
||||
Dispatch();
|
||||
@ -1156,7 +1150,7 @@ IGNITION_HANDLER(Negate, InterpreterAssembler) {
|
||||
|
||||
UnaryOpAssembler unary_op_asm(state());
|
||||
TNode<Object> result = unary_op_asm.Generate_NegateWithFeedback(
|
||||
context, value, slot_index, maybe_feedback_vector);
|
||||
context, value, slot_index, maybe_feedback_vector, false);
|
||||
|
||||
SetAccumulator(result);
|
||||
Dispatch();
|
||||
@ -1217,7 +1211,7 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
|
||||
|
||||
UnaryOpAssembler unary_op_asm(state());
|
||||
TNode<Object> result = unary_op_asm.Generate_IncrementWithFeedback(
|
||||
context, value, slot_index, maybe_feedback_vector);
|
||||
context, value, slot_index, maybe_feedback_vector, false);
|
||||
|
||||
SetAccumulator(result);
|
||||
Dispatch();
|
||||
@ -1234,7 +1228,7 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
|
||||
|
||||
UnaryOpAssembler unary_op_asm(state());
|
||||
TNode<Object> result = unary_op_asm.Generate_DecrementWithFeedback(
|
||||
context, value, slot_index, maybe_feedback_vector);
|
||||
context, value, slot_index, maybe_feedback_vector, false);
|
||||
|
||||
SetAccumulator(result);
|
||||
Dispatch();
|
||||
@ -1623,8 +1617,8 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
|
||||
|
||||
TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
|
||||
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
|
||||
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
|
||||
slot_index);
|
||||
MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
|
||||
slot_index, false);
|
||||
SetAccumulator(result);
|
||||
Dispatch();
|
||||
}
|
||||
@ -2852,7 +2846,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
|
||||
TNode<FixedArray> cache_array;
|
||||
TNode<Smi> cache_length;
|
||||
ForInPrepare(enumerator, vector_index, maybe_feedback_vector, &cache_array,
|
||||
&cache_length);
|
||||
&cache_length, false);
|
||||
|
||||
StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
|
||||
Dispatch();
|
||||
@ -2887,7 +2881,7 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
|
||||
{
|
||||
TNode<Object> result =
|
||||
ForInNextSlow(GetContext(), vector_index, receiver, key, cache_type,
|
||||
maybe_feedback_vector);
|
||||
maybe_feedback_vector, false);
|
||||
SetAccumulator(result);
|
||||
Dispatch();
|
||||
}
|
||||
|
@ -969,6 +969,10 @@ class RuntimeCallTimer final {
|
||||
V(BoundFunctionNameGetter) \
|
||||
V(CodeGenerationFromStringsCallbacks) \
|
||||
V(CompileBackgroundCompileTask) \
|
||||
V(CompileBaseline) \
|
||||
V(CompileBaselineVisit) \
|
||||
V(CompileBaselinePrepareHandlerOffsets) \
|
||||
V(CompileBaselinePreVisit) \
|
||||
V(CompileCollectSourcePositions) \
|
||||
V(CompileDeserialize) \
|
||||
V(CompileEnqueueOnDispatcher) \
|
||||
|
@ -1221,15 +1221,18 @@ void Logger::LogSourceCodeInformation(Handle<AbstractCode> code,
|
||||
<< reinterpret_cast<void*>(code->InstructionStart()) << Logger::kNext
|
||||
<< script.id() << Logger::kNext << shared->StartPosition()
|
||||
<< Logger::kNext << shared->EndPosition() << Logger::kNext;
|
||||
|
||||
SourcePositionTableIterator iterator(code->source_position_table());
|
||||
// TODO(v8:11429): Clean-up sparkplug-replated code in source position
|
||||
// iteration.
|
||||
bool hasInlined = false;
|
||||
for (; !iterator.done(); iterator.Advance()) {
|
||||
SourcePosition pos = iterator.source_position();
|
||||
msg << "C" << iterator.code_offset() << "O" << pos.ScriptOffset();
|
||||
if (pos.isInlined()) {
|
||||
msg << "I" << pos.InliningId();
|
||||
hasInlined = true;
|
||||
if (code->kind() != CodeKind::SPARKPLUG) {
|
||||
SourcePositionTableIterator iterator(code->source_position_table());
|
||||
for (; !iterator.done(); iterator.Advance()) {
|
||||
SourcePosition pos = iterator.source_position();
|
||||
msg << "C" << iterator.code_offset() << "O" << pos.ScriptOffset();
|
||||
if (pos.isInlined()) {
|
||||
msg << "I" << pos.InliningId();
|
||||
hasInlined = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
msg << Logger::kNext;
|
||||
@ -2102,6 +2105,7 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
|
||||
switch (abstract_code->kind()) {
|
||||
case CodeKind::INTERPRETED_FUNCTION:
|
||||
case CodeKind::TURBOFAN:
|
||||
case CodeKind::SPARKPLUG:
|
||||
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
|
||||
case CodeKind::TURBOPROP:
|
||||
return; // We log this later using LogCompiledFunctions.
|
||||
@ -2173,12 +2177,21 @@ void ExistingCodeLogger::LogCompiledFunctions() {
|
||||
// During iteration, there can be heap allocation due to
|
||||
// GetScriptLineNumber call.
|
||||
for (auto& pair : compiled_funcs) {
|
||||
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, pair.first);
|
||||
if (pair.first->function_data(kAcquireLoad).IsInterpreterData()) {
|
||||
Handle<SharedFunctionInfo> shared = pair.first;
|
||||
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, shared);
|
||||
if (shared->HasInterpreterData()) {
|
||||
LogExistingFunction(
|
||||
pair.first,
|
||||
shared,
|
||||
Handle<AbstractCode>(
|
||||
AbstractCode::cast(pair.first->InterpreterTrampoline()),
|
||||
AbstractCode::cast(shared->InterpreterTrampoline()), isolate_),
|
||||
CodeEventListener::INTERPRETED_FUNCTION_TAG);
|
||||
}
|
||||
if (shared->HasBaselineData()) {
|
||||
// TODO(v8:11429): Add a tag for baseline code. Or use CodeKind?
|
||||
LogExistingFunction(
|
||||
shared,
|
||||
Handle<AbstractCode>(
|
||||
AbstractCode::cast(shared->baseline_data().baseline_code()),
|
||||
isolate_),
|
||||
CodeEventListener::INTERPRETED_FUNCTION_TAG);
|
||||
}
|
||||
|
@ -317,6 +317,21 @@ builtin NewRestArgumentsElements(
|
||||
frame, formalParameterCount, Convert<intptr>(argumentCount));
|
||||
}
|
||||
|
||||
builtin FastNewSloppyArguments(implicit context: Context)(f: JSFunction):
|
||||
JSSloppyArgumentsObject {
|
||||
return EmitFastNewSloppyArguments(f);
|
||||
}
|
||||
|
||||
builtin FastNewStrictArguments(implicit context: Context)(f: JSFunction):
|
||||
JSStrictArgumentsObject {
|
||||
return EmitFastNewStrictArguments(f);
|
||||
}
|
||||
|
||||
builtin FastNewRestArguments(implicit context: Context)(f: JSFunction):
|
||||
JSArray {
|
||||
return EmitFastNewRestArguments(f);
|
||||
}
|
||||
|
||||
macro
|
||||
AccessSloppyArgumentsCommon(
|
||||
receiver: JSObject, keyObject: Object): &Object labels Bailout {
|
||||
|
@ -5,12 +5,12 @@
|
||||
#ifndef V8_OBJECTS_CODE_INL_H_
|
||||
#define V8_OBJECTS_CODE_INL_H_
|
||||
|
||||
#include "src/objects/code.h"
|
||||
|
||||
#include "src/base/memory.h"
|
||||
#include "src/codegen/code-desc.h"
|
||||
#include "src/common/assert-scope.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/interpreter/bytecode-register.h"
|
||||
#include "src/objects/code.h"
|
||||
#include "src/objects/dictionary.h"
|
||||
#include "src/objects/instance-type-inl.h"
|
||||
#include "src/objects/map-inl.h"
|
||||
@ -329,6 +329,67 @@ CodeKind Code::kind() const {
|
||||
return KindField::decode(ReadField<uint32_t>(kFlagsOffset));
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
|
||||
// TODO(v8:11429): Extract out of header, to generic helper, and merge with
|
||||
// TranslationArray de/encoding.
|
||||
inline int ReadUint(ByteArray array, int* index) {
|
||||
int byte = 0;
|
||||
int value = 0;
|
||||
int shift = 0;
|
||||
do {
|
||||
byte = array.get((*index)++);
|
||||
value += (byte & ((1 << 7) - 1)) << shift;
|
||||
shift += 7;
|
||||
} while (byte & (1 << 7));
|
||||
return value;
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
int Code::GetBytecodeOffsetForSparkplugPC(Address sparkplug_pc) {
|
||||
DisallowGarbageCollection no_gc;
|
||||
if (is_baseline_prologue_builtin()) return kFunctionEntryBytecodeOffset;
|
||||
if (is_baseline_leave_frame_builtin()) return kFunctionExitBytecodeOffset;
|
||||
CHECK_EQ(kind(), CodeKind::SPARKPLUG);
|
||||
ByteArray data = ByteArray::cast(source_position_table());
|
||||
Address lookup_pc = 0;
|
||||
Address pc = sparkplug_pc - InstructionStart();
|
||||
int index = 0;
|
||||
int offset = 0;
|
||||
while (pc > lookup_pc) {
|
||||
lookup_pc += detail::ReadUint(data, &index);
|
||||
offset += detail::ReadUint(data, &index);
|
||||
}
|
||||
CHECK_EQ(pc, lookup_pc);
|
||||
return offset;
|
||||
}
|
||||
|
||||
uintptr_t Code::GetSparkplugPCForBytecodeOffset(int bytecode_offset,
|
||||
bool precise) {
|
||||
DisallowGarbageCollection no_gc;
|
||||
CHECK_EQ(kind(), CodeKind::SPARKPLUG);
|
||||
ByteArray data = ByteArray::cast(source_position_table());
|
||||
intptr_t pc = 0;
|
||||
int index = 0;
|
||||
int offset = 0;
|
||||
// TODO(v8:11429,cbruni): clean up
|
||||
// Return the offset for the last bytecode that matches
|
||||
while (offset < bytecode_offset && index < data.length()) {
|
||||
int delta_pc = detail::ReadUint(data, &index);
|
||||
int delta_offset = detail::ReadUint(data, &index);
|
||||
if (!precise && (bytecode_offset < offset + delta_offset)) break;
|
||||
pc += delta_pc;
|
||||
offset += delta_offset;
|
||||
}
|
||||
if (precise) {
|
||||
CHECK_EQ(offset, bytecode_offset);
|
||||
} else {
|
||||
CHECK_LE(offset, bytecode_offset);
|
||||
}
|
||||
return pc;
|
||||
}
|
||||
|
||||
void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
|
||||
bool is_off_heap_trampoline) {
|
||||
CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax);
|
||||
@ -352,6 +413,14 @@ inline bool Code::is_interpreter_trampoline_builtin() const {
|
||||
index == Builtins::kInterpreterEnterBytecodeDispatch);
|
||||
}
|
||||
|
||||
inline bool Code::is_baseline_leave_frame_builtin() const {
|
||||
return builtin_index() == Builtins::kBaselineLeaveFrame;
|
||||
}
|
||||
|
||||
inline bool Code::is_baseline_prologue_builtin() const {
|
||||
return builtin_index() == Builtins::kBaselineOutOfLinePrologue;
|
||||
}
|
||||
|
||||
inline bool Code::checks_optimization_marker() const {
|
||||
bool checks_marker =
|
||||
(builtin_index() == Builtins::kCompileLazy ||
|
||||
@ -458,13 +527,14 @@ int Code::stack_slots() const {
|
||||
}
|
||||
|
||||
bool Code::marked_for_deoptimization() const {
|
||||
DCHECK(CodeKindCanDeoptimize(kind()));
|
||||
// TODO(v8:11429): Re-evaluate if sparkplug code can really deopt.
|
||||
DCHECK(CodeKindCanDeoptimize(kind()) || kind() == CodeKind::SPARKPLUG);
|
||||
int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
|
||||
return MarkedForDeoptimizationField::decode(flags);
|
||||
}
|
||||
|
||||
void Code::set_marked_for_deoptimization(bool flag) {
|
||||
DCHECK(CodeKindCanDeoptimize(kind()));
|
||||
DCHECK(CodeKindCanDeoptimize(kind()) || kind() == CodeKind::SPARKPLUG);
|
||||
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
|
||||
CodeDataContainer container = code_data_container(kAcquireLoad);
|
||||
int32_t previous = container.kind_specific_flags();
|
||||
|
@ -22,6 +22,8 @@ const char* CodeKindToMarker(CodeKind kind) {
|
||||
switch (kind) {
|
||||
case CodeKind::INTERPRETED_FUNCTION:
|
||||
return "~";
|
||||
case CodeKind::SPARKPLUG:
|
||||
return "^";
|
||||
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
|
||||
return "-";
|
||||
case CodeKind::TURBOPROP:
|
||||
|
@ -27,6 +27,7 @@ namespace internal {
|
||||
V(C_WASM_ENTRY) \
|
||||
V(INTERPRETED_FUNCTION) \
|
||||
V(NATIVE_CONTEXT_INDEPENDENT) \
|
||||
V(SPARKPLUG) \
|
||||
V(TURBOPROP) \
|
||||
V(TURBOFAN)
|
||||
|
||||
@ -65,7 +66,7 @@ inline constexpr bool CodeKindIsOptimizedJSFunction(CodeKind kind) {
|
||||
}
|
||||
|
||||
inline constexpr bool CodeKindIsJSFunction(CodeKind kind) {
|
||||
return kind == CodeKind::INTERPRETED_FUNCTION ||
|
||||
return CodeKindIsInterpretedJSFunction(kind) ||
|
||||
CodeKindIsOptimizedJSFunction(kind);
|
||||
}
|
||||
|
||||
@ -86,11 +87,12 @@ inline constexpr bool CodeKindCanOSR(CodeKind kind) {
|
||||
|
||||
inline constexpr bool CodeKindIsOptimizedAndCanTierUp(CodeKind kind) {
|
||||
return kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT ||
|
||||
kind == CodeKind::SPARKPLUG ||
|
||||
(!FLAG_turboprop_as_toptier && kind == CodeKind::TURBOPROP);
|
||||
}
|
||||
|
||||
inline constexpr bool CodeKindCanTierUp(CodeKind kind) {
|
||||
return kind == CodeKind::INTERPRETED_FUNCTION ||
|
||||
return CodeKindIsInterpretedJSFunction(kind) ||
|
||||
CodeKindIsOptimizedAndCanTierUp(kind);
|
||||
}
|
||||
|
||||
@ -149,7 +151,8 @@ DEFINE_OPERATORS_FOR_FLAGS(CodeKinds)
|
||||
|
||||
static constexpr CodeKinds kJSFunctionCodeKindsMask{
|
||||
CodeKindFlag::INTERPRETED_FUNCTION | CodeKindFlag::TURBOFAN |
|
||||
CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT | CodeKindFlag::TURBOPROP};
|
||||
CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT | CodeKindFlag::TURBOPROP |
|
||||
CodeKindFlag::SPARKPLUG};
|
||||
static constexpr CodeKinds kOptimizedJSFunctionCodeKindsMask{
|
||||
CodeKindFlag::TURBOFAN | CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT |
|
||||
CodeKindFlag::TURBOPROP};
|
||||
|
@ -497,10 +497,14 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
|
||||
if ((name != nullptr) && (name[0] != '\0')) {
|
||||
os << "name = " << name << "\n";
|
||||
}
|
||||
if (CodeKindIsOptimizedJSFunction(kind())) {
|
||||
if (CodeKindIsOptimizedJSFunction(kind()) && kind() != CodeKind::SPARKPLUG) {
|
||||
os << "stack_slots = " << stack_slots() << "\n";
|
||||
}
|
||||
os << "compiler = " << (is_turbofanned() ? "turbofan" : "unknown") << "\n";
|
||||
os << "compiler = "
|
||||
<< (is_turbofanned()
|
||||
? "turbofan"
|
||||
: kind() == CodeKind::SPARKPLUG ? "baseline" : "unknown")
|
||||
<< "\n";
|
||||
os << "address = " << reinterpret_cast<void*>(ptr()) << "\n\n";
|
||||
|
||||
if (is_off_heap_trampoline()) {
|
||||
@ -531,32 +535,34 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
|
||||
}
|
||||
os << "\n";
|
||||
|
||||
{
|
||||
SourcePositionTableIterator it(
|
||||
SourcePositionTable(), SourcePositionTableIterator::kJavaScriptOnly);
|
||||
if (!it.done()) {
|
||||
os << "Source positions:\n pc offset position\n";
|
||||
for (; !it.done(); it.Advance()) {
|
||||
os << std::setw(10) << std::hex << it.code_offset() << std::dec
|
||||
<< std::setw(10) << it.source_position().ScriptOffset()
|
||||
<< (it.is_statement() ? " statement" : "") << "\n";
|
||||
if (kind() != CodeKind::SPARKPLUG) {
|
||||
{
|
||||
SourcePositionTableIterator it(
|
||||
SourcePositionTable(), SourcePositionTableIterator::kJavaScriptOnly);
|
||||
if (!it.done()) {
|
||||
os << "Source positions:\n pc offset position\n";
|
||||
for (; !it.done(); it.Advance()) {
|
||||
os << std::setw(10) << std::hex << it.code_offset() << std::dec
|
||||
<< std::setw(10) << it.source_position().ScriptOffset()
|
||||
<< (it.is_statement() ? " statement" : "") << "\n";
|
||||
}
|
||||
os << "\n";
|
||||
}
|
||||
os << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
SourcePositionTableIterator it(SourcePositionTable(),
|
||||
SourcePositionTableIterator::kExternalOnly);
|
||||
if (!it.done()) {
|
||||
os << "External Source positions:\n pc offset fileid line\n";
|
||||
for (; !it.done(); it.Advance()) {
|
||||
DCHECK(it.source_position().IsExternal());
|
||||
os << std::setw(10) << std::hex << it.code_offset() << std::dec
|
||||
<< std::setw(10) << it.source_position().ExternalFileId()
|
||||
<< std::setw(10) << it.source_position().ExternalLine() << "\n";
|
||||
{
|
||||
SourcePositionTableIterator it(
|
||||
SourcePositionTable(), SourcePositionTableIterator::kExternalOnly);
|
||||
if (!it.done()) {
|
||||
os << "External Source positions:\n pc offset fileid line\n";
|
||||
for (; !it.done(); it.Advance()) {
|
||||
DCHECK(it.source_position().IsExternal());
|
||||
os << std::setw(10) << std::hex << it.code_offset() << std::dec
|
||||
<< std::setw(10) << it.source_position().ExternalFileId()
|
||||
<< std::setw(10) << it.source_position().ExternalLine() << "\n";
|
||||
}
|
||||
os << "\n";
|
||||
}
|
||||
os << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
@ -613,10 +619,6 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
|
||||
eh_frame_disassembler.DisassembleToStream(os);
|
||||
os << "\n";
|
||||
}
|
||||
|
||||
if (has_code_comments()) {
|
||||
PrintCodeCommentsSection(os, code_comments(), code_comments_size());
|
||||
}
|
||||
}
|
||||
#endif // ENABLE_DISASSEMBLER
|
||||
|
||||
@ -626,6 +628,8 @@ void BytecodeArray::Disassemble(std::ostream& os) {
|
||||
os << "Parameter count " << parameter_count() << "\n";
|
||||
os << "Register count " << register_count() << "\n";
|
||||
os << "Frame size " << frame_size() << "\n";
|
||||
os << "OSR nesting level: " << osr_loop_nesting_level() << "\n";
|
||||
os << "Bytecode Age: " << bytecode_age() << "\n";
|
||||
|
||||
Address base_address = GetFirstBytecodeAddress();
|
||||
SourcePositionTableIterator source_positions(SourcePositionTable());
|
||||
|
@ -249,6 +249,10 @@ class Code : public HeapObject {
|
||||
// Testers for interpreter builtins.
|
||||
inline bool is_interpreter_trampoline_builtin() const;
|
||||
|
||||
// Testers for baseline builtins.
|
||||
inline bool is_baseline_prologue_builtin() const;
|
||||
inline bool is_baseline_leave_frame_builtin() const;
|
||||
|
||||
// Tells whether the code checks the optimization marker in the function's
|
||||
// feedback vector.
|
||||
inline bool checks_optimization_marker() const;
|
||||
@ -375,6 +379,10 @@ class Code : public HeapObject {
|
||||
static inline void CopyRelocInfoToByteArray(ByteArray dest,
|
||||
const CodeDesc& desc);
|
||||
|
||||
inline uintptr_t GetSparkplugPCForBytecodeOffset(int bytecode_offset,
|
||||
bool precise = true);
|
||||
inline int GetBytecodeOffsetForSparkplugPC(Address sparkplug_pc);
|
||||
|
||||
// Flushes the instruction cache for the executable instructions of this code
|
||||
// object. Make sure to call this while the code is still writable.
|
||||
void FlushICache() const;
|
||||
|
@ -74,7 +74,7 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
|
||||
}
|
||||
|
||||
DCHECK(!is_compiled() || ActiveTierIsIgnition() || ActiveTierIsNCI() ||
|
||||
ActiveTierIsMidtierTurboprop());
|
||||
ActiveTierIsMidtierTurboprop() || ActiveTierIsSparkplug());
|
||||
DCHECK(!ActiveTierIsTurbofan());
|
||||
DCHECK(shared().IsInterpreted());
|
||||
DCHECK(shared().allows_lazy_compilation() ||
|
||||
|
@ -52,6 +52,13 @@ CodeKinds JSFunction::GetAvailableCodeKinds() const {
|
||||
}
|
||||
}
|
||||
|
||||
if ((result & CodeKindFlag::SPARKPLUG) == 0) {
|
||||
// The SharedFunctionInfo could have attached baseline code.
|
||||
if (shared().HasBaselineData()) {
|
||||
result |= CodeKindFlag::SPARKPLUG;
|
||||
}
|
||||
}
|
||||
|
||||
// Check the optimized code cache.
|
||||
if (has_feedback_vector() && feedback_vector().has_optimized_code() &&
|
||||
!feedback_vector().optimized_code().marked_for_deoptimization()) {
|
||||
@ -91,6 +98,9 @@ bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
|
||||
} else if ((kinds & CodeKindFlag::TURBOPROP) != 0) {
|
||||
*highest_tier = CodeKind::TURBOPROP;
|
||||
return true;
|
||||
} else if ((kinds & CodeKindFlag::SPARKPLUG) != 0) {
|
||||
*highest_tier = CodeKind::SPARKPLUG;
|
||||
return true;
|
||||
} else if ((kinds & CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT) != 0) {
|
||||
*highest_tier = CodeKind::NATIVE_CONTEXT_INDEPENDENT;
|
||||
return true;
|
||||
@ -121,6 +131,7 @@ CodeKind JSFunction::GetActiveTier() const {
|
||||
DCHECK(shared().is_compiled());
|
||||
HighestTierOf(GetAvailableCodeKinds(), &highest_tier);
|
||||
DCHECK(highest_tier == CodeKind::TURBOFAN ||
|
||||
highest_tier == CodeKind::SPARKPLUG ||
|
||||
highest_tier == CodeKind::TURBOPROP ||
|
||||
highest_tier == CodeKind::NATIVE_CONTEXT_INDEPENDENT ||
|
||||
highest_tier == CodeKind::INTERPRETED_FUNCTION);
|
||||
@ -137,6 +148,16 @@ bool JSFunction::ActiveTierIsNCI() const {
|
||||
return GetActiveTier() == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
|
||||
}
|
||||
|
||||
bool JSFunction::ActiveTierIsSparkplug() const {
|
||||
CodeKind highest_tier;
|
||||
if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return false;
|
||||
return highest_tier == CodeKind::SPARKPLUG;
|
||||
}
|
||||
|
||||
bool JSFunction::ActiveTierIsIgnitionOrSparkplug() const {
|
||||
return ActiveTierIsIgnition() || ActiveTierIsSparkplug();
|
||||
}
|
||||
|
||||
bool JSFunction::ActiveTierIsToptierTurboprop() const {
|
||||
if (!FLAG_turboprop_as_toptier) return false;
|
||||
if (!shared().HasBytecodeArray()) return false;
|
||||
@ -153,7 +174,7 @@ CodeKind JSFunction::NextTier() const {
|
||||
if (V8_UNLIKELY(FLAG_turboprop) && ActiveTierIsMidtierTurboprop()) {
|
||||
return CodeKind::TURBOFAN;
|
||||
} else if (V8_UNLIKELY(FLAG_turboprop)) {
|
||||
DCHECK(ActiveTierIsIgnition());
|
||||
DCHECK(ActiveTierIsIgnitionOrSparkplug());
|
||||
return CodeKind::TURBOPROP;
|
||||
}
|
||||
return CodeKind::TURBOFAN;
|
||||
|
@ -117,6 +117,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
|
||||
V8_EXPORT_PRIVATE bool ActiveTierIsIgnition() const;
|
||||
bool ActiveTierIsTurbofan() const;
|
||||
bool ActiveTierIsNCI() const;
|
||||
bool ActiveTierIsSparkplug() const;
|
||||
bool ActiveTierIsIgnitionOrSparkplug() const;
|
||||
bool ActiveTierIsMidtierTurboprop() const;
|
||||
bool ActiveTierIsToptierTurboprop() const;
|
||||
|
||||
|
@ -124,6 +124,7 @@ namespace internal {
|
||||
V(_, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
|
||||
V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
|
||||
async_generator_request) \
|
||||
V(_, BASELINE_DATA_TYPE, BaselineData, baseline_data) \
|
||||
V(_, BREAK_POINT_TYPE, BreakPoint, break_point) \
|
||||
V(_, BREAK_POINT_INFO_TYPE, BreakPointInfo, break_point_info) \
|
||||
V(_, CACHED_TEMPLATE_OBJECT_TYPE, CachedTemplateObject, \
|
||||
|
@ -89,6 +89,8 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledData)
|
||||
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseData)
|
||||
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseData)
|
||||
|
||||
TQ_OBJECT_CONSTRUCTORS_IMPL(BaselineData)
|
||||
|
||||
OBJECT_CONSTRUCTORS_IMPL(InterpreterData, Struct)
|
||||
|
||||
CAST_ACCESSOR(InterpreterData)
|
||||
@ -163,6 +165,8 @@ bool SharedFunctionInfo::needs_script_context() const {
|
||||
|
||||
template <typename LocalIsolate>
|
||||
AbstractCode SharedFunctionInfo::abstract_code(LocalIsolate* isolate) {
|
||||
// TODO(v8:11429): Decide if this return bytecode or baseline code, when the
|
||||
// latter is present.
|
||||
if (HasBytecodeArray()) {
|
||||
return AbstractCode::cast(GetBytecodeArray(isolate));
|
||||
} else {
|
||||
@ -482,7 +486,8 @@ FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() const {
|
||||
|
||||
bool SharedFunctionInfo::HasBytecodeArray() const {
|
||||
Object data = function_data(kAcquireLoad);
|
||||
return data.IsBytecodeArray() || data.IsInterpreterData();
|
||||
return data.IsBytecodeArray() || data.IsInterpreterData() ||
|
||||
data.IsBaselineData();
|
||||
}
|
||||
|
||||
template <typename LocalIsolate>
|
||||
@ -496,7 +501,11 @@ BytecodeArray SharedFunctionInfo::GetBytecodeArray(
|
||||
return GetDebugInfo().OriginalBytecodeArray();
|
||||
}
|
||||
|
||||
Object data = function_data(kAcquireLoad);
|
||||
return GetActiveBytecodeArray();
|
||||
}
|
||||
|
||||
BytecodeArray BaselineData::GetActiveBytecodeArray() const {
|
||||
Object data = this->data();
|
||||
if (data.IsBytecodeArray()) {
|
||||
return BytecodeArray::cast(data);
|
||||
} else {
|
||||
@ -505,10 +514,22 @@ BytecodeArray SharedFunctionInfo::GetBytecodeArray(
|
||||
}
|
||||
}
|
||||
|
||||
void BaselineData::SetActiveBytecodeArray(BytecodeArray bytecode) {
|
||||
Object data = this->data();
|
||||
if (data.IsBytecodeArray()) {
|
||||
set_data(bytecode);
|
||||
} else {
|
||||
DCHECK(data.IsInterpreterData());
|
||||
InterpreterData::cast(data).set_bytecode_array(bytecode);
|
||||
}
|
||||
}
|
||||
|
||||
BytecodeArray SharedFunctionInfo::GetActiveBytecodeArray() const {
|
||||
Object data = function_data(kAcquireLoad);
|
||||
if (data.IsBytecodeArray()) {
|
||||
return BytecodeArray::cast(data);
|
||||
} else if (data.IsBaselineData()) {
|
||||
return baseline_data().GetActiveBytecodeArray();
|
||||
} else {
|
||||
DCHECK(data.IsInterpreterData());
|
||||
return InterpreterData::cast(data).bytecode_array();
|
||||
@ -519,6 +540,8 @@ void SharedFunctionInfo::SetActiveBytecodeArray(BytecodeArray bytecode) {
|
||||
Object data = function_data(kAcquireLoad);
|
||||
if (data.IsBytecodeArray()) {
|
||||
set_function_data(bytecode, kReleaseStore);
|
||||
} else if (data.IsBaselineData()) {
|
||||
baseline_data().SetActiveBytecodeArray(bytecode);
|
||||
} else {
|
||||
DCHECK(data.IsInterpreterData());
|
||||
interpreter_data().set_bytecode_array(bytecode);
|
||||
@ -558,20 +581,43 @@ Code SharedFunctionInfo::InterpreterTrampoline() const {
|
||||
}
|
||||
|
||||
bool SharedFunctionInfo::HasInterpreterData() const {
|
||||
return function_data(kAcquireLoad).IsInterpreterData();
|
||||
Object data = function_data(kAcquireLoad);
|
||||
if (data.IsBaselineData()) data = BaselineData::cast(data).data();
|
||||
return data.IsInterpreterData();
|
||||
}
|
||||
|
||||
InterpreterData SharedFunctionInfo::interpreter_data() const {
|
||||
DCHECK(HasInterpreterData());
|
||||
return InterpreterData::cast(function_data(kAcquireLoad));
|
||||
Object data = function_data(kAcquireLoad);
|
||||
if (data.IsBaselineData()) data = BaselineData::cast(data).data();
|
||||
return InterpreterData::cast(data);
|
||||
}
|
||||
|
||||
void SharedFunctionInfo::set_interpreter_data(
|
||||
InterpreterData interpreter_data) {
|
||||
DCHECK(FLAG_interpreted_frames_native_stack);
|
||||
DCHECK(!HasBaselineData());
|
||||
set_function_data(interpreter_data, kReleaseStore);
|
||||
}
|
||||
|
||||
bool SharedFunctionInfo::HasBaselineData() const {
|
||||
return function_data(kAcquireLoad).IsBaselineData();
|
||||
}
|
||||
|
||||
BaselineData SharedFunctionInfo::baseline_data() const {
|
||||
DCHECK(HasBaselineData());
|
||||
return BaselineData::cast(function_data(kAcquireLoad));
|
||||
}
|
||||
|
||||
void SharedFunctionInfo::set_baseline_data(BaselineData baseline_data) {
|
||||
set_function_data(baseline_data, kReleaseStore);
|
||||
}
|
||||
|
||||
void SharedFunctionInfo::flush_baseline_data() {
|
||||
DCHECK(HasBaselineData());
|
||||
set_function_data(baseline_data().data(), kReleaseStore);
|
||||
}
|
||||
|
||||
bool SharedFunctionInfo::HasAsmWasmData() const {
|
||||
return function_data(kAcquireLoad).IsAsmWasmData();
|
||||
}
|
||||
@ -780,8 +826,9 @@ bool SharedFunctionInfo::IsSubjectToDebugging() const {
|
||||
}
|
||||
|
||||
bool SharedFunctionInfo::CanDiscardCompiled() const {
|
||||
bool can_decompile = (HasBytecodeArray() || HasAsmWasmData() ||
|
||||
HasUncompiledDataWithPreparseData());
|
||||
bool can_decompile =
|
||||
(HasBytecodeArray() || HasAsmWasmData() ||
|
||||
HasUncompiledDataWithPreparseData() || HasBaselineData());
|
||||
return can_decompile;
|
||||
}
|
||||
|
||||
|
@ -82,6 +82,10 @@ Code SharedFunctionInfo::GetCode() const {
|
||||
// Having a bytecode array means we are a compiled, interpreted function.
|
||||
DCHECK(HasBytecodeArray());
|
||||
return isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
|
||||
} else if (data.IsBaselineData()) {
|
||||
// Having BaselineData means we are a compiled, baseline function.
|
||||
DCHECK(HasBaselineData());
|
||||
return baseline_data().baseline_code();
|
||||
} else if (data.IsAsmWasmData()) {
|
||||
// Having AsmWasmData means we are an asm.js/wasm function.
|
||||
DCHECK(HasAsmWasmData());
|
||||
|
@ -154,6 +154,14 @@ class InterpreterData : public Struct {
|
||||
OBJECT_CONSTRUCTORS(InterpreterData, Struct);
|
||||
};
|
||||
|
||||
class BaselineData : public TorqueGeneratedBaselineData<BaselineData, Struct> {
|
||||
public:
|
||||
inline BytecodeArray GetActiveBytecodeArray() const;
|
||||
inline void SetActiveBytecodeArray(BytecodeArray bytecode);
|
||||
|
||||
TQ_OBJECT_CONSTRUCTORS(BaselineData)
|
||||
};
|
||||
|
||||
// SharedFunctionInfo describes the JSFunction information that can be
|
||||
// shared by multiple instances of the function.
|
||||
class SharedFunctionInfo
|
||||
@ -300,6 +308,10 @@ class SharedFunctionInfo
|
||||
inline bool HasInterpreterData() const;
|
||||
inline InterpreterData interpreter_data() const;
|
||||
inline void set_interpreter_data(InterpreterData interpreter_data);
|
||||
inline bool HasBaselineData() const;
|
||||
inline BaselineData baseline_data() const;
|
||||
inline void set_baseline_data(BaselineData Baseline_data);
|
||||
inline void flush_baseline_data();
|
||||
inline BytecodeArray GetActiveBytecodeArray() const;
|
||||
inline void SetActiveBytecodeArray(BytecodeArray bytecode);
|
||||
inline bool HasAsmWasmData() const;
|
||||
|
@ -14,6 +14,13 @@ extern class InterpreterData extends Struct {
|
||||
interpreter_trampoline: Code;
|
||||
}
|
||||
|
||||
@generateCppClass
|
||||
@generatePrint
|
||||
extern class BaselineData extends Struct {
|
||||
baseline_code: Code;
|
||||
data: BytecodeArray|InterpreterData;
|
||||
}
|
||||
|
||||
type FunctionKind extends uint8 constexpr 'FunctionKind';
|
||||
type FunctionSyntaxKind extends uint8 constexpr 'FunctionSyntaxKind';
|
||||
type BailoutReason extends uint8 constexpr 'BailoutReason';
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "src/deoptimizer/deoptimizer.h"
|
||||
#include "src/handles/handles-inl.h"
|
||||
#include "src/objects/code-inl.h"
|
||||
#include "src/objects/code.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
#include "src/objects/script-inl.h"
|
||||
#include "src/objects/shared-function-info-inl.h"
|
||||
@ -115,21 +116,37 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
|
||||
|
||||
is_shared_cross_origin = script->origin_options().IsSharedCrossOrigin();
|
||||
|
||||
// TODO(v8:11429,cbruni): improve iteration for sparkplug code
|
||||
bool is_sparkplug = abstract_code->kind() == CodeKind::SPARKPLUG;
|
||||
Handle<ByteArray> source_position_table(
|
||||
abstract_code->source_position_table(), isolate_);
|
||||
if (is_sparkplug) {
|
||||
source_position_table = handle(
|
||||
shared->GetBytecodeArray(isolate_).SourcePositionTable(), isolate_);
|
||||
}
|
||||
// Add each position to the source position table and store inlining stacks
|
||||
// for inline positions. We store almost the same information in the
|
||||
// profiler as is stored on the code object, except that we transform source
|
||||
// positions to line numbers here, because we only care about attributing
|
||||
// ticks to a given line.
|
||||
for (SourcePositionTableIterator it(
|
||||
handle(abstract_code->source_position_table(), isolate_));
|
||||
!it.done(); it.Advance()) {
|
||||
for (SourcePositionTableIterator it(source_position_table); !it.done();
|
||||
it.Advance()) {
|
||||
int position = it.source_position().ScriptOffset();
|
||||
int inlining_id = it.source_position().InliningId();
|
||||
int code_offset = it.code_offset();
|
||||
if (is_sparkplug) {
|
||||
// Use the bytecode offset to calculate pc offset for sparkplug code.
|
||||
// TODO(v8:11429,cbruni): Speed this up.
|
||||
code_offset = static_cast<int>(
|
||||
abstract_code->GetCode().GetSparkplugPCForBytecodeOffset(
|
||||
code_offset, false));
|
||||
}
|
||||
|
||||
if (inlining_id == SourcePosition::kNotInlined) {
|
||||
int line_number = script->GetLineNumber(position) + 1;
|
||||
line_table->SetPosition(it.code_offset(), line_number, inlining_id);
|
||||
line_table->SetPosition(code_offset, line_number, inlining_id);
|
||||
} else {
|
||||
DCHECK(!is_sparkplug);
|
||||
DCHECK(abstract_code->IsCode());
|
||||
Handle<Code> code = handle(abstract_code->GetCode(), isolate_);
|
||||
std::vector<SourcePositionInfo> stack =
|
||||
@ -140,7 +157,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
|
||||
// then the script of the inlined frames may be different to the script
|
||||
// of |shared|.
|
||||
int line_number = stack.front().line + 1;
|
||||
line_table->SetPosition(it.code_offset(), line_number, inlining_id);
|
||||
line_table->SetPosition(code_offset, line_number, inlining_id);
|
||||
|
||||
std::vector<CodeEntryAndLineNumber> inline_stack;
|
||||
for (SourcePositionInfo& pos_info : stack) {
|
||||
|
@ -3,19 +3,25 @@
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/asmjs/asm-js.h"
|
||||
#include "src/baseline/baseline.h"
|
||||
#include "src/codegen/compilation-cache.h"
|
||||
#include "src/codegen/compiler.h"
|
||||
#include "src/codegen/optimized-compilation-info.h"
|
||||
#include "src/common/assert-scope.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/common/message-template.h"
|
||||
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
|
||||
#include "src/compiler/pipeline.h"
|
||||
#include "src/deoptimizer/deoptimizer.h"
|
||||
#include "src/execution/arguments-inl.h"
|
||||
#include "src/execution/frames-inl.h"
|
||||
#include "src/execution/isolate-inl.h"
|
||||
#include "src/execution/v8threads.h"
|
||||
#include "src/execution/vm-state-inl.h"
|
||||
#include "src/heap/parked-scope.h"
|
||||
#include "src/objects/js-array-buffer-inl.h"
|
||||
#include "src/objects/js-array-inl.h"
|
||||
#include "src/objects/shared-function-info.h"
|
||||
#include "src/runtime/runtime-utils.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -103,6 +109,23 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
|
||||
return function->code();
|
||||
}
|
||||
|
||||
// TODO(v8:11429): Consider renaming PrepareForBaseline.
|
||||
RUNTIME_FUNCTION(Runtime_PrepareForBaseline) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
|
||||
Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
|
||||
DCHECK(sfi->HasBaselineData());
|
||||
IsCompiledScope is_compiled_scope(*sfi, isolate);
|
||||
DCHECK(!function->HasAvailableOptimizedCode());
|
||||
DCHECK(!function->HasOptimizationMarker());
|
||||
DCHECK(!function->has_feedback_vector());
|
||||
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
|
||||
Code baseline_code = sfi->baseline_data().baseline_code();
|
||||
function->set_code(baseline_code);
|
||||
return baseline_code;
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_TryInstallNCICode) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
@ -285,9 +308,12 @@ BytecodeOffset DetermineEntryAndDisarmOSRForInterpreter(
|
||||
// the bytecode.
|
||||
Handle<BytecodeArray> bytecode(iframe->GetBytecodeArray(), iframe->isolate());
|
||||
|
||||
DCHECK(frame->LookupCode().is_interpreter_trampoline_builtin());
|
||||
DCHECK(frame->function().shared().HasBytecodeArray());
|
||||
DCHECK_IMPLIES(frame->type() == StackFrame::INTERPRETED,
|
||||
frame->LookupCode().is_interpreter_trampoline_builtin());
|
||||
DCHECK_IMPLIES(frame->type() == StackFrame::SPARKPLUG,
|
||||
frame->LookupCode().kind() == CodeKind::SPARKPLUG);
|
||||
DCHECK(frame->is_interpreted());
|
||||
DCHECK(frame->function().shared().HasBytecodeArray());
|
||||
|
||||
// Reset the OSR loop nesting depth to disarm back edges.
|
||||
bytecode->set_osr_loop_nesting_level(0);
|
||||
@ -404,6 +430,40 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
|
||||
return Object();
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_CompileBaseline) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
|
||||
|
||||
Handle<SharedFunctionInfo> shared(function->shared(isolate), isolate);
|
||||
IsCompiledScope is_compiled_scope = shared->is_compiled_scope(isolate);
|
||||
|
||||
StackLimitCheck check(isolate);
|
||||
if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
|
||||
return isolate->StackOverflow();
|
||||
}
|
||||
if (!shared->IsUserJavaScript()) {
|
||||
return *function;
|
||||
}
|
||||
if (!is_compiled_scope.is_compiled()) {
|
||||
if (!Compiler::Compile(function, Compiler::KEEP_EXCEPTION,
|
||||
&is_compiled_scope)) {
|
||||
return ReadOnlyRoots(isolate).exception();
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(v8:11429): Add a Compiler::Compile* method for this.
|
||||
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
|
||||
|
||||
if (!shared->HasBaselineData()) {
|
||||
Handle<Code> code = CompileWithBaseline(isolate, shared);
|
||||
function->set_code(*code);
|
||||
} else {
|
||||
function->set_code(shared->baseline_data().baseline_code(isolate));
|
||||
}
|
||||
return *function;
|
||||
}
|
||||
|
||||
static Object CompileGlobalEval(Isolate* isolate,
|
||||
Handle<i::Object> source_object,
|
||||
Handle<SharedFunctionInfo> outer_info,
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include "src/api/api.h"
|
||||
#include "src/ast/ast-traversal-visitor.h"
|
||||
#include "src/ast/prettyprinter.h"
|
||||
#include "src/baseline/baseline.h"
|
||||
#include "src/builtins/builtins.h"
|
||||
#include "src/common/message-template.h"
|
||||
#include "src/debug/debug.h"
|
||||
@ -335,6 +336,13 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromBytecode) {
|
||||
IsCompiledScope is_compiled_scope(
|
||||
function->shared().is_compiled_scope(isolate));
|
||||
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
|
||||
if (FLAG_sparkplug && !function->shared().HasBaselineData() &&
|
||||
!function->shared().HasBreakInfo()) {
|
||||
// TODO(v8:11429): Expose via Compiler, do set_code there.
|
||||
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
|
||||
Handle<Code> code = CompileWithBaseline(isolate, shared);
|
||||
function->set_code(*code);
|
||||
}
|
||||
// Also initialize the invocation count here. This is only really needed for
|
||||
// OSR. When we OSR functions with lazy feedback allocation we want to have
|
||||
// a non zero invocation count so we can inline functions.
|
||||
|
@ -99,16 +99,19 @@ void PrintRegisters(Isolate* isolate, std::ostream& os, bool is_input,
|
||||
|
||||
} // namespace
|
||||
|
||||
// TODO(v8:11429): Consider either renaming to not just be "Interpreter", or
|
||||
// copying for Sparkplug.
|
||||
RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeEntry) {
|
||||
if (!FLAG_trace_ignition) {
|
||||
return ReadOnlyRoots(isolate).undefined_value();
|
||||
}
|
||||
|
||||
SealHandleScope shs(isolate);
|
||||
DCHECK_EQ(3, args.length());
|
||||
DCHECK_EQ(4, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
|
||||
CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
|
||||
CONVERT_ARG_HANDLE_CHECKED(Object, accumulator, 2);
|
||||
CONVERT_ARG_HANDLE_CHECKED(Object, is_sparkplug, 3);
|
||||
|
||||
int offset = bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
|
||||
interpreter::BytecodeArrayIterator bytecode_iterator(bytecode_array);
|
||||
@ -120,8 +123,13 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeEntry) {
|
||||
const uint8_t* base_address = reinterpret_cast<const uint8_t*>(
|
||||
bytecode_array->GetFirstBytecodeAddress());
|
||||
const uint8_t* bytecode_address = base_address + offset;
|
||||
os << " -> " << static_cast<const void*>(bytecode_address) << " @ "
|
||||
<< std::setw(4) << offset << " : ";
|
||||
if (is_sparkplug->BooleanValue(isolate)) {
|
||||
os << "S-> ";
|
||||
} else {
|
||||
os << " -> ";
|
||||
}
|
||||
os << static_cast<const void*>(bytecode_address) << " @ " << std::setw(4)
|
||||
<< offset << " : ";
|
||||
interpreter::BytecodeDecoder::Decode(os, bytecode_address,
|
||||
bytecode_array->parameter_count());
|
||||
os << std::endl;
|
||||
@ -139,7 +147,7 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
|
||||
}
|
||||
|
||||
SealHandleScope shs(isolate);
|
||||
DCHECK_EQ(3, args.length());
|
||||
DCHECK_EQ(4, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
|
||||
CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
|
||||
CONVERT_ARG_HANDLE_CHECKED(Object, accumulator, 2);
|
||||
|
@ -190,6 +190,10 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
|
||||
|
||||
if (function->HasAttachedOptimizedCode()) {
|
||||
Deoptimizer::DeoptimizeFunction(*function);
|
||||
} else if (function->code().kind() == CodeKind::SPARKPLUG) {
|
||||
// TODO(v8:11429): This should either be in Deoptimizer::DeoptimizeFunction,
|
||||
// or not be considered deoptimization at all.
|
||||
Deoptimizer::DeoptimizeSparkplug(function->shared());
|
||||
}
|
||||
|
||||
return ReadOnlyRoots(isolate).undefined_value();
|
||||
@ -208,6 +212,8 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
|
||||
|
||||
if (function->HasAttachedOptimizedCode()) {
|
||||
Deoptimizer::DeoptimizeFunction(*function);
|
||||
} else if (function->code().kind() == CodeKind::SPARKPLUG) {
|
||||
Deoptimizer::DeoptimizeSparkplug(function->shared());
|
||||
}
|
||||
|
||||
return ReadOnlyRoots(isolate).undefined_value();
|
||||
@ -502,7 +508,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
|
||||
function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
|
||||
|
||||
// Make the profiler arm all back edges in unoptimized code.
|
||||
if (it.frame()->type() == StackFrame::INTERPRETED) {
|
||||
if (it.frame()->IsUnoptimizedJavaScriptFrame()) {
|
||||
isolate->runtime_profiler()->AttemptOnStackReplacement(
|
||||
InterpretedFrame::cast(it.frame()),
|
||||
AbstractCode::kMaxLoopNestingMarker);
|
||||
@ -595,6 +601,10 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
|
||||
status |= static_cast<int>(OptimizationStatus::kTurboFanned);
|
||||
}
|
||||
}
|
||||
// TODO(v8:11429): Clean up code kind predicates to include Sparkplug.
|
||||
if (function->code().kind() == CodeKind::SPARKPLUG) {
|
||||
status |= static_cast<int>(OptimizationStatus::kSparkplug);
|
||||
}
|
||||
if (function->ActiveTierIsIgnition()) {
|
||||
status |= static_cast<int>(OptimizationStatus::kInterpreted);
|
||||
}
|
||||
|
@ -105,8 +105,10 @@ namespace internal {
|
||||
#define FOR_EACH_INTRINSIC_COMPILER(F, I) \
|
||||
F(CompileForOnStackReplacement, 0, 1) \
|
||||
F(CompileLazy, 1, 1) \
|
||||
F(CompileBaseline, 1, 1) \
|
||||
F(CompileOptimized_Concurrent, 1, 1) \
|
||||
F(CompileOptimized_NotConcurrent, 1, 1) \
|
||||
F(PrepareForBaseline, 1, 1) \
|
||||
F(HealOptimizedCodeSlot, 1, 1) \
|
||||
F(FunctionFirstExecution, 1, 1) \
|
||||
F(InstantiateAsmJs, 4, 1) \
|
||||
@ -152,8 +154,8 @@ namespace internal {
|
||||
|
||||
#ifdef V8_TRACE_IGNITION
|
||||
#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F, I) \
|
||||
F(InterpreterTraceBytecodeEntry, 3, 1) \
|
||||
F(InterpreterTraceBytecodeExit, 3, 1)
|
||||
F(InterpreterTraceBytecodeEntry, 4, 1) \
|
||||
F(InterpreterTraceBytecodeExit, 4, 1)
|
||||
#else
|
||||
#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F, I)
|
||||
#endif
|
||||
@ -848,6 +850,7 @@ enum class OptimizationStatus {
|
||||
kTopmostFrameIsTurboFanned = 1 << 11,
|
||||
kLiteMode = 1 << 12,
|
||||
kMarkedForDeoptimization = 1 << 13,
|
||||
kSparkplug = 1 << 14,
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
@ -121,6 +121,12 @@ void Serializer::SerializeObject(Handle<HeapObject> obj) {
|
||||
// indirection and serialize the actual string directly.
|
||||
if (obj->IsThinString(isolate())) {
|
||||
obj = handle(ThinString::cast(*obj).actual(isolate()), isolate());
|
||||
} else if (obj->IsBaselineData()) {
|
||||
// For now just serialize the BytecodeArray instead of baseline data.
|
||||
// TODO(v8:11429,pthier): Handle BaselineData in cases we want to serialize
|
||||
// Baseline code.
|
||||
obj = handle(Handle<BaselineData>::cast(obj)->GetActiveBytecodeArray(),
|
||||
isolate());
|
||||
}
|
||||
SerializeObjectImpl(obj);
|
||||
}
|
||||
@ -638,7 +644,7 @@ void Serializer::ObjectSerializer::Serialize() {
|
||||
RecursionScope recursion(serializer_);
|
||||
|
||||
// Defer objects as "pending" if they cannot be serialized now, or if we
|
||||
// exceed a certain recursion depth. Some objects cannot be deferred
|
||||
// exceed a certain recursion depth. Some objects cannot be deferred.
|
||||
if ((recursion.ExceedsMaximum() && CanBeDeferred(*object_)) ||
|
||||
serializer_->MustBeDeferred(*object_)) {
|
||||
DCHECK(CanBeDeferred(*object_));
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user