[sparkplug] Extract assembler to baseline-assembler*

Bug: v8:11429
Change-Id: I98b65613dc05f593644af45388b1f2c2a7df34a1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2712567
Auto-Submit: Toon Verwaest <verwaest@chromium.org>
Commit-Queue: Toon Verwaest <verwaest@chromium.org>
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72923}
This commit is contained in:
Toon Verwaest 2021-02-22 18:22:43 +01:00 committed by Commit Bot
parent 8d11b06f91
commit 2cb031ecfd
10 changed files with 1307 additions and 1181 deletions

View File

@ -2558,6 +2558,8 @@ v8_source_set("v8_base_without_compiler") {
"src/ast/source-range-ast-visitor.h",
"src/ast/variables.cc",
"src/ast/variables.h",
"src/baseline/baseline-assembler-inl.h",
"src/baseline/baseline-assembler.h",
"src/baseline/baseline-compiler.cc",
"src/baseline/baseline-compiler.h",
"src/baseline/baseline.cc",
@ -3724,6 +3726,7 @@ v8_source_set("v8_base_without_compiler") {
if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
"src/baseline/ia32/baseline-assembler-ia32-inl.h",
"src/baseline/ia32/baseline-compiler-ia32-inl.h",
"src/codegen/ia32/assembler-ia32-inl.h",
"src/codegen/ia32/assembler-ia32.cc",
@ -3751,6 +3754,7 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "x64") {
sources += [ ### gcmole(arch:x64) ###
"src/baseline/x64/baseline-assembler-x64-inl.h",
"src/baseline/x64/baseline-compiler-x64-inl.h",
"src/codegen/x64/assembler-x64-inl.h",
"src/codegen/x64/assembler-x64.cc",
@ -3802,6 +3806,7 @@ v8_source_set("v8_base_without_compiler") {
}
} else if (v8_current_cpu == "arm") {
sources += [ ### gcmole(arch:arm) ###
"src/baseline/arm/baseline-assembler-arm-inl.h",
"src/baseline/arm/baseline-compiler-arm-inl.h",
"src/codegen/arm/assembler-arm-inl.h",
"src/codegen/arm/assembler-arm.cc",
@ -3834,6 +3839,7 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "arm64") {
sources += [ ### gcmole(arch:arm64) ###
"src/baseline/arm64/baseline-assembler-arm64-inl.h",
"src/baseline/arm64/baseline-compiler-arm64-inl.h",
"src/codegen/arm64/assembler-arm64-inl.h",
"src/codegen/arm64/assembler-arm64.cc",
@ -3894,6 +3900,7 @@ v8_source_set("v8_base_without_compiler") {
}
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
"src/baseline/mips/baseline-assembler-mips-inl.h",
"src/baseline/mips/baseline-compiler-mips-inl.h",
"src/codegen/mips/assembler-mips-inl.h",
"src/codegen/mips/assembler-mips.cc",
@ -3923,6 +3930,7 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ###
"src/baseline/mips64/baseline-assembler-mips64-inl.h",
"src/baseline/mips64/baseline-compiler-mips64-inl.h",
"src/codegen/mips64/assembler-mips64-inl.h",
"src/codegen/mips64/assembler-mips64.cc",
@ -3952,6 +3960,7 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "ppc") {
sources += [ ### gcmole(arch:ppc) ###
"src/baseline/ppc/baseline-assembler-ppc-inl.h",
"src/baseline/ppc/baseline-compiler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.cc",
@ -3984,6 +3993,7 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc64) ###
"src/baseline/ppc/baseline-assembler-ppc-inl.h",
"src/baseline/ppc/baseline-compiler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.cc",
@ -4016,6 +4026,7 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ###
"src/baseline/s390/baseline-assembler-s390-inl.h",
"src/baseline/s390/baseline-compiler-s390-inl.h",
"src/codegen/s390/assembler-s390-inl.h",
"src/codegen/s390/assembler-s390.cc",

View File

@ -0,0 +1,539 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_ARM64_BASELINE_ASSEMBLER_ARM64_INL_H_
#define V8_BASELINE_ARM64_BASELINE_ASSEMBLER_ARM64_INL_H_
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen/interface-descriptors.h"
namespace v8 {
namespace internal {
namespace baseline {
class BaselineAssembler::ScratchRegisterScope {
public:
explicit ScratchRegisterScope(BaselineAssembler* assembler)
: assembler_(assembler),
prev_scope_(assembler->scratch_register_scope_),
wrapped_scope_(assembler->masm()) {
if (!assembler_->scratch_register_scope_) {
// If we haven't opened a scratch scope yet, for the first one add a
// couple of extra registers.
wrapped_scope_.Include(x14, x15);
}
assembler_->scratch_register_scope_ = this;
}
~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
Register AcquireScratch() { return wrapped_scope_.AcquireX(); }
private:
BaselineAssembler* assembler_;
ScratchRegisterScope* prev_scope_;
UseScratchRegisterScope wrapped_scope_;
};
// TODO(v8:11461): Unify condition names in the MacroAssembler.
enum class Condition : uint8_t {
kEqual = eq,
kNotEqual = ne,
kLessThan = lt,
kGreaterThan = gt,
kLessThanEqual = le,
kGreaterThanEqual = ge,
kUnsignedLessThan = lo,
kUnsignedGreaterThan = hi,
kUnsignedLessThanEqual = ls,
kUnsignedGreaterThanEqual = hs,
kOverflow = vs,
kNoOverflow = vc,
kZero = eq,
kNotZero = ne,
};
inline internal::Condition AsMasmCondition(Condition cond) {
return static_cast<internal::Condition>(cond);
}
namespace detail {
#ifdef DEBUG
inline bool Clobbers(Register target, MemOperand op) {
return op.base() == target || op.regoffset() == target;
}
#endif
} // namespace detail
#define __ masm_->
MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
void BaselineAssembler::Bind(Label* label) {
// All baseline compiler binds on arm64 are assumed to be for jump targets.
__ BindJumpTarget(label);
}
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ B(target);
}
void BaselineAssembler::JumpIf(Condition cc, Label* target, Label::Distance) {
__ B(AsMasmCondition(cc), target);
}
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfRoot(value, index, target);
}
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfNotRoot(value, index, target);
}
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfSmi(value, target);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
}
void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
ScratchRegisterScope temps(this);
Register temp = temps.AcquireScratch();
__ LoadEntryFromBuiltinIndex(builtin, temp);
__ Call(temp);
}
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
// x17 is used to allow using "Call" (i.e. `bti c`) rather than "Jump" (i.e.]
// `bti j`) landing pads for the tail-called code.
Register temp = x17;
// Make sure we're don't use this register as a temporary.
UseScratchRegisterScope temps(masm());
temps.Exclude(temp);
__ LoadEntryFromBuiltinIndex(builtin, temp);
__ Jump(temp);
}
void BaselineAssembler::Test(Register value, int mask) {
__ Tst(value, Immediate(mask));
}
void BaselineAssembler::CmpObjectType(Register object,
InstanceType instance_type,
Register map) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
__ CompareObjectType(object, map, type, instance_type);
}
void BaselineAssembler::CmpInstanceType(Register value,
InstanceType instance_type) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
__ CompareInstanceType(value, type, instance_type);
}
void BaselineAssembler::Cmp(Register value, Smi smi) { __ Cmp(value, smi); }
void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Ldr(tmp, operand);
__ Cmp(value, tmp);
}
void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
__ CmpTagged(lhs, rhs);
}
void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Ldr(tmp, operand);
__ CmpTagged(value, tmp);
}
void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Ldr(tmp, operand);
__ CmpTagged(tmp, value);
}
void BaselineAssembler::CompareByte(Register value, int32_t byte) {
__ Cmp(value, Immediate(byte));
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
Move(RegisterFrameOperand(output), source);
}
void BaselineAssembler::Move(Register output, TaggedIndex value) {
__ Mov(output, Immediate(value.ptr()));
}
void BaselineAssembler::Move(MemOperand output, Register source) {
__ Str(source, output);
}
void BaselineAssembler::Move(Register output, ExternalReference reference) {
__ Mov(output, Operand(reference));
}
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
__ Mov(output, Operand(value));
}
void BaselineAssembler::Move(Register output, int32_t value) {
__ Mov(output, Immediate(value));
}
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
__ Mov(output, source);
}
void BaselineAssembler::MoveSmi(Register output, Register source) {
__ Mov(output, source);
}
namespace detail {
template <typename Arg>
inline Register ToRegister(BaselineAssembler* basm,
BaselineAssembler::ScratchRegisterScope* scope,
Arg arg) {
Register reg = scope->AcquireScratch();
basm->Move(reg, arg);
return reg;
}
inline Register ToRegister(BaselineAssembler* basm,
BaselineAssembler::ScratchRegisterScope* scope,
Register reg) {
return reg;
}
template <typename... Args>
struct CountPushHelper;
template <>
struct CountPushHelper<> {
static int Count() { return 0; }
};
template <typename Arg, typename... Args>
struct CountPushHelper<Arg, Args...> {
static int Count(Arg arg, Args... args) {
return 1 + CountPushHelper<Args...>::Count(args...);
}
};
template <typename... Args>
struct CountPushHelper<interpreter::RegisterList, Args...> {
static int Count(interpreter::RegisterList list, Args... args) {
return list.register_count() + CountPushHelper<Args...>::Count(args...);
}
};
template <typename... Args>
struct PushAllHelper;
template <typename... Args>
inline void PushAll(BaselineAssembler* basm, Args... args) {
PushAllHelper<Args...>::Push(basm, args...);
}
template <typename... Args>
inline void PushAllReverse(BaselineAssembler* basm, Args... args) {
PushAllHelper<Args...>::PushReverse(basm, args...);
}
template <>
struct PushAllHelper<> {
static void Push(BaselineAssembler* basm) {}
static void PushReverse(BaselineAssembler* basm) {}
};
template <typename Arg>
struct PushAllHelper<Arg> {
static void Push(BaselineAssembler* basm, Arg) { FATAL("Unaligned push"); }
static void PushReverse(BaselineAssembler* basm, Arg arg) {
// Push the padding register to round up the amount of values pushed.
return PushAllReverse(basm, arg, padreg);
}
};
template <typename Arg1, typename Arg2, typename... Args>
struct PushAllHelper<Arg1, Arg2, Args...> {
static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
Args... args) {
{
BaselineAssembler::ScratchRegisterScope scope(basm);
basm->masm()->Push(ToRegister(basm, &scope, arg1),
ToRegister(basm, &scope, arg2));
}
PushAll(basm, args...);
}
static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
Args... args) {
PushAllReverse(basm, args...);
{
BaselineAssembler::ScratchRegisterScope scope(basm);
basm->masm()->Push(ToRegister(basm, &scope, arg2),
ToRegister(basm, &scope, arg1));
}
}
};
// Currently RegisterLists are always be the last argument, so we don't
// specialize for the case where they're not. We do still specialise for the
// aligned and unaligned cases.
template <typename Arg>
struct PushAllHelper<Arg, interpreter::RegisterList> {
static void Push(BaselineAssembler* basm, Arg arg,
interpreter::RegisterList list) {
DCHECK_EQ(list.register_count() % 2, 1);
PushAll(basm, arg, list[0], list.PopLeft());
}
static void PushReverse(BaselineAssembler* basm, Arg arg,
interpreter::RegisterList list) {
if (list.register_count() == 0) {
PushAllReverse(basm, arg);
} else {
PushAllReverse(basm, arg, list[0], list.PopLeft());
}
}
};
template <>
struct PushAllHelper<interpreter::RegisterList> {
static void Push(BaselineAssembler* basm, interpreter::RegisterList list) {
DCHECK_EQ(list.register_count() % 2, 0);
for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) {
PushAll(basm, list[reg_index], list[reg_index + 1]);
}
}
static void PushReverse(BaselineAssembler* basm,
interpreter::RegisterList list) {
int reg_index = list.register_count() - 1;
if (reg_index % 2 == 0) {
// Push the padding register to round up the amount of values pushed.
PushAllReverse(basm, list[reg_index], padreg);
reg_index--;
}
for (; reg_index >= 1; reg_index -= 2) {
PushAllReverse(basm, list[reg_index - 1], list[reg_index]);
}
}
};
template <typename... T>
struct PopAllHelper;
template <>
struct PopAllHelper<> {
static void Pop(BaselineAssembler* basm) {}
};
template <>
struct PopAllHelper<Register> {
static void Pop(BaselineAssembler* basm, Register reg) {
basm->masm()->Pop(reg, padreg);
}
};
template <typename... T>
struct PopAllHelper<Register, Register, T...> {
static void Pop(BaselineAssembler* basm, Register reg1, Register reg2,
T... tail) {
basm->masm()->Pop(reg1, reg2);
PopAllHelper<T...>::Pop(basm, tail...);
}
};
} // namespace detail
template <typename... T>
int BaselineAssembler::Push(T... vals) {
// We have to count the pushes first, to decide whether to add padding before
// the first push.
int push_count = detail::CountPushHelper<T...>::Count(vals...);
if (push_count % 2 == 0) {
detail::PushAll(this, vals...);
} else {
detail::PushAll(this, padreg, vals...);
}
return push_count;
}
template <typename... T>
void BaselineAssembler::PushReverse(T... vals) {
detail::PushAllReverse(this, vals...);
}
template <typename... T>
void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
__ LoadTaggedSignedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadByteField(Register output, Register source,
int offset) {
__ Ldrb(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Mov(tmp, Operand(value));
__ StoreTaggedField(tmp, FieldMemOperand(target, offset));
}
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
__ StoreTaggedField(value, FieldMemOperand(target, offset));
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
__ StoreTaggedField(value, FieldMemOperand(target, offset));
}
void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch().W();
__ Ldr(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
__ Adds(interrupt_budget, interrupt_budget, weight);
__ Str(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
}
void BaselineAssembler::AddToInterruptBudget(Register weight) {
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch().W();
__ Ldr(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
__ Adds(interrupt_budget, interrupt_budget, weight.W());
__ Str(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (SmiValuesAre31Bits()) {
__ Add(lhs.W(), lhs.W(), Immediate(rhs));
} else {
DCHECK(lhs.IsX());
__ Add(lhs, lhs, Immediate(rhs));
}
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
Label fallthrough;
if (case_value_base > 0) {
__ Sub(reg, reg, Immediate(case_value_base));
}
// Mostly copied from code-generator-arm64.cc
ScratchRegisterScope scope(this);
Register temp = scope.AcquireScratch();
Label table;
__ Cmp(reg, num_labels);
JumpIf(Condition::kUnsignedGreaterThanEqual, &fallthrough);
__ Adr(temp, &table);
int entry_size_log2 = 2;
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
++entry_size_log2; // Account for BTI.
#endif
__ Add(temp, temp, Operand(reg, UXTW, entry_size_log2));
__ Br(temp);
{
TurboAssembler::BlockPoolsScope block_pools(masm_, num_labels * kInstrSize);
__ Bind(&table);
for (int i = 0; i < num_labels; ++i) {
__ JumpTarget();
__ B(labels[i]);
}
__ JumpTarget();
__ Bind(&fallthrough);
}
}
#undef __
#define __ basm.
void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
BaselineAssembler basm(masm);
Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
__ RecordComment("[ Update Interrupt Budget");
__ AddToInterruptBudget(weight);
// Use compare flags set by add
Label skip_interrupt_label;
__ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
{
__ masm()->SmiTag(params_size);
__ masm()->Push(params_size, kInterpreterAccumulatorRegister);
__ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister);
__ masm()->PushArgument(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
__ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size);
}
__ RecordComment("]");
__ Bind(&skip_interrupt_label);
BaselineAssembler::ScratchRegisterScope temps(&basm);
Register actual_params_size = temps.AcquireScratch();
// Compute the size of the actual parameters + receiver (in bytes).
__ Move(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
__ masm()->Cmp(params_size, actual_params_size);
__ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count);
__ masm()->Mov(params_size, actual_params_size);
__ Bind(&corrected_args_count);
// Leave the frame (also dropping the register file).
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
__ masm()->Add(params_size, params_size, 1); // Include the receiver.
__ masm()->DropArguments(params_size);
__ masm()->Ret();
}
#undef __
} // namespace baseline
} // namespace internal
} // namespace v8
#endif // V8_BASELINE_ARM64_BASELINE_ASSEMBLER_ARM64_INL_H_

View File

@ -11,470 +11,6 @@ namespace v8 {
namespace internal {
namespace baseline {
class BaselineAssembler::ScratchRegisterScope {
public:
explicit ScratchRegisterScope(BaselineAssembler* assembler)
: assembler_(assembler),
prev_scope_(assembler->scratch_register_scope_),
wrapped_scope_(assembler->masm()) {
if (!assembler_->scratch_register_scope_) {
// If we haven't opened a scratch scope yet, for the first one add a
// couple of extra registers.
wrapped_scope_.Include(x14, x15);
}
assembler_->scratch_register_scope_ = this;
}
~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
Register AcquireScratch() { return wrapped_scope_.AcquireX(); }
private:
BaselineAssembler* assembler_;
ScratchRegisterScope* prev_scope_;
UseScratchRegisterScope wrapped_scope_;
};
// TODO(v8:11461): Unify condition names in the MacroAssembler.
enum class Condition : uint8_t {
kEqual = eq,
kNotEqual = ne,
kLessThan = lt,
kGreaterThan = gt,
kLessThanEqual = le,
kGreaterThanEqual = ge,
kUnsignedLessThan = lo,
kUnsignedGreaterThan = hi,
kUnsignedLessThanEqual = ls,
kUnsignedGreaterThanEqual = hs,
kOverflow = vs,
kNoOverflow = vc,
kZero = eq,
kNotZero = ne,
};
internal::Condition AsMasmCondition(Condition cond) {
return static_cast<internal::Condition>(cond);
}
namespace detail {
#ifdef DEBUG
bool Clobbers(Register target, MemOperand op) {
return op.base() == target || op.regoffset() == target;
}
#endif
} // namespace detail
#define __ masm_->
MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
void BaselineAssembler::Bind(Label* label) {
// All baseline compiler binds on arm64 are assumed to be for jump targets.
__ BindJumpTarget(label);
}
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ B(target);
}
void BaselineAssembler::JumpIf(Condition cc, Label* target, Label::Distance) {
__ B(AsMasmCondition(cc), target);
}
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfRoot(value, index, target);
}
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfNotRoot(value, index, target);
}
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfSmi(value, target);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
}
void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
ScratchRegisterScope temps(this);
Register temp = temps.AcquireScratch();
__ LoadEntryFromBuiltinIndex(builtin, temp);
__ Call(temp);
}
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
// x17 is used to allow using "Call" (i.e. `bti c`) rather than "Jump" (i.e.]
// `bti j`) landing pads for the tail-called code.
Register temp = x17;
// Make sure we're don't use this register as a temporary.
UseScratchRegisterScope temps(masm());
temps.Exclude(temp);
__ LoadEntryFromBuiltinIndex(builtin, temp);
__ Jump(temp);
}
void BaselineAssembler::Test(Register value, int mask) {
__ Tst(value, Immediate(mask));
}
void BaselineAssembler::CmpObjectType(Register object,
InstanceType instance_type,
Register map) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
__ CompareObjectType(object, map, type, instance_type);
}
void BaselineAssembler::CmpInstanceType(Register value,
InstanceType instance_type) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
__ CompareInstanceType(value, type, instance_type);
}
void BaselineAssembler::Cmp(Register value, Smi smi) { __ Cmp(value, smi); }
void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Ldr(tmp, operand);
__ Cmp(value, tmp);
}
void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
__ CmpTagged(lhs, rhs);
}
void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Ldr(tmp, operand);
__ CmpTagged(value, tmp);
}
void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Ldr(tmp, operand);
__ CmpTagged(tmp, value);
}
void BaselineAssembler::CompareByte(Register value, int32_t byte) {
__ Cmp(value, Immediate(byte));
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
Move(RegisterFrameOperand(output), source);
}
void BaselineAssembler::Move(Register output, TaggedIndex value) {
__ Mov(output, Immediate(value.ptr()));
}
void BaselineAssembler::Move(MemOperand output, Register source) {
__ Str(source, output);
}
void BaselineAssembler::Move(Register output, ExternalReference reference) {
__ Mov(output, Operand(reference));
}
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
__ Mov(output, Operand(value));
}
void BaselineAssembler::Move(Register output, int32_t value) {
__ Mov(output, Immediate(value));
}
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
__ Mov(output, source);
}
void BaselineAssembler::MoveSmi(Register output, Register source) {
__ Mov(output, source);
}
namespace detail {
template <typename Arg>
Register ToRegister(BaselineAssembler* basm,
BaselineAssembler::ScratchRegisterScope* scope, Arg arg) {
Register reg = scope->AcquireScratch();
basm->Move(reg, arg);
return reg;
}
Register ToRegister(BaselineAssembler* basm,
BaselineAssembler::ScratchRegisterScope* scope,
Register reg) {
return reg;
}
template <typename... Args>
struct CountPushHelper;
template <>
struct CountPushHelper<> {
static int Count() { return 0; }
};
template <typename Arg, typename... Args>
struct CountPushHelper<Arg, Args...> {
static int Count(Arg arg, Args... args) {
return 1 + CountPushHelper<Args...>::Count(args...);
}
};
template <typename... Args>
struct CountPushHelper<interpreter::RegisterList, Args...> {
static int Count(interpreter::RegisterList list, Args... args) {
return list.register_count() + CountPushHelper<Args...>::Count(args...);
}
};
template <typename... Args>
struct PushAllHelper;
template <typename... Args>
void PushAll(BaselineAssembler* basm, Args... args) {
PushAllHelper<Args...>::Push(basm, args...);
}
template <typename... Args>
void PushAllReverse(BaselineAssembler* basm, Args... args) {
PushAllHelper<Args...>::PushReverse(basm, args...);
}
template <>
struct PushAllHelper<> {
static void Push(BaselineAssembler* basm) {}
static void PushReverse(BaselineAssembler* basm) {}
};
template <typename Arg>
struct PushAllHelper<Arg> {
static void Push(BaselineAssembler* basm, Arg) { FATAL("Unaligned push"); }
static void PushReverse(BaselineAssembler* basm, Arg arg) {
// Push the padding register to round up the amount of values pushed.
return PushAllReverse(basm, arg, padreg);
}
};
template <typename Arg1, typename Arg2, typename... Args>
struct PushAllHelper<Arg1, Arg2, Args...> {
static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
Args... args) {
{
BaselineAssembler::ScratchRegisterScope scope(basm);
basm->masm()->Push(ToRegister(basm, &scope, arg1),
ToRegister(basm, &scope, arg2));
}
PushAll(basm, args...);
}
static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
Args... args) {
PushAllReverse(basm, args...);
{
BaselineAssembler::ScratchRegisterScope scope(basm);
basm->masm()->Push(ToRegister(basm, &scope, arg2),
ToRegister(basm, &scope, arg1));
}
}
};
// Currently RegisterLists are always be the last argument, so we don't
// specialize for the case where they're not. We do still specialise for the
// aligned and unaligned cases.
template <typename Arg>
struct PushAllHelper<Arg, interpreter::RegisterList> {
static void Push(BaselineAssembler* basm, Arg arg,
interpreter::RegisterList list) {
DCHECK_EQ(list.register_count() % 2, 1);
PushAll(basm, arg, list[0], list.PopLeft());
}
static void PushReverse(BaselineAssembler* basm, Arg arg,
interpreter::RegisterList list) {
if (list.register_count() == 0) {
PushAllReverse(basm, arg);
} else {
PushAllReverse(basm, arg, list[0], list.PopLeft());
}
}
};
template <>
struct PushAllHelper<interpreter::RegisterList> {
static void Push(BaselineAssembler* basm, interpreter::RegisterList list) {
DCHECK_EQ(list.register_count() % 2, 0);
for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) {
PushAll(basm, list[reg_index], list[reg_index + 1]);
}
}
static void PushReverse(BaselineAssembler* basm,
interpreter::RegisterList list) {
int reg_index = list.register_count() - 1;
if (reg_index % 2 == 0) {
// Push the padding register to round up the amount of values pushed.
PushAllReverse(basm, list[reg_index], padreg);
reg_index--;
}
for (; reg_index >= 1; reg_index -= 2) {
PushAllReverse(basm, list[reg_index - 1], list[reg_index]);
}
}
};
template <typename... T>
struct PopAllHelper;
template <>
struct PopAllHelper<> {
static void Pop(BaselineAssembler* basm) {}
};
template <>
struct PopAllHelper<Register> {
static void Pop(BaselineAssembler* basm, Register reg) {
basm->masm()->Pop(reg, padreg);
}
};
template <typename... T>
struct PopAllHelper<Register, Register, T...> {
static void Pop(BaselineAssembler* basm, Register reg1, Register reg2,
T... tail) {
basm->masm()->Pop(reg1, reg2);
PopAllHelper<T...>::Pop(basm, tail...);
}
};
} // namespace detail
template <typename... T>
int BaselineAssembler::Push(T... vals) {
// We have to count the pushes first, to decide whether to add padding before
// the first push.
int push_count = detail::CountPushHelper<T...>::Count(vals...);
if (push_count % 2 == 0) {
detail::PushAll(this, vals...);
} else {
detail::PushAll(this, padreg, vals...);
}
return push_count;
}
template <typename... T>
void BaselineAssembler::PushReverse(T... vals) {
detail::PushAllReverse(this, vals...);
}
template <typename... T>
void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
__ LoadTaggedSignedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadByteField(Register output, Register source,
int offset) {
__ Ldrb(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Mov(tmp, Operand(value));
__ StoreTaggedField(tmp, FieldMemOperand(target, offset));
}
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
__ StoreTaggedField(value, FieldMemOperand(target, offset));
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
__ StoreTaggedField(value, FieldMemOperand(target, offset));
}
void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch().W();
__ Ldr(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
__ Adds(interrupt_budget, interrupt_budget, weight);
__ Str(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
}
void BaselineAssembler::AddToInterruptBudget(Register weight) {
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch().W();
__ Ldr(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
__ Adds(interrupt_budget, interrupt_budget, weight.W());
__ Str(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (SmiValuesAre31Bits()) {
__ Add(lhs.W(), lhs.W(), Immediate(rhs));
} else {
DCHECK(lhs.IsX());
__ Add(lhs, lhs, Immediate(rhs));
}
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
Label fallthrough;
if (case_value_base > 0) {
__ Sub(reg, reg, Immediate(case_value_base));
}
// Mostly copied from code-generator-arm64.cc
ScratchRegisterScope scope(this);
Register temp = scope.AcquireScratch();
Label table;
__ Cmp(reg, num_labels);
JumpIf(Condition::kUnsignedGreaterThanEqual, &fallthrough);
__ Adr(temp, &table);
int entry_size_log2 = 2;
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
++entry_size_log2; // Account for BTI.
#endif
__ Add(temp, temp, Operand(reg, UXTW, entry_size_log2));
__ Br(temp);
{
TurboAssembler::BlockPoolsScope block_pools(masm_, num_labels * kInstrSize);
__ Bind(&table);
for (int i = 0; i < num_labels; ++i) {
__ JumpTarget();
__ B(labels[i]);
}
__ JumpTarget();
__ Bind(&fallthrough);
}
}
#undef __
#define __ basm_.
void BaselineCompiler::Prologue() {
@ -572,61 +108,6 @@ void BaselineCompiler::VerifyFrameSize() {
#undef __
#define __ basm.
void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
BaselineAssembler basm(masm);
Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
__ RecordComment("[ Update Interrupt Budget");
__ AddToInterruptBudget(weight);
// Use compare flags set by add
Label skip_interrupt_label;
__ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
{
__ masm()->SmiTag(params_size);
__ masm()->Push(params_size, kInterpreterAccumulatorRegister);
__ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister);
__ masm()->PushArgument(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
__ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size);
}
__ RecordComment("]");
__ Bind(&skip_interrupt_label);
BaselineAssembler::ScratchRegisterScope temps(&basm);
Register actual_params_size = temps.AcquireScratch();
// Compute the size of the actual parameters + receiver (in bytes).
__ Move(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
__ masm()->Cmp(params_size, actual_params_size);
__ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count);
__ masm()->Mov(params_size, actual_params_size);
__ Bind(&corrected_args_count);
// Leave the frame (also dropping the register file).
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
__ masm()->Add(params_size, params_size, 1); // Include the receiver.
__ masm()->DropArguments(params_size);
__ masm()->Ret();
}
#undef __
} // namespace baseline
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,134 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_BASELINE_ASSEMBLER_INL_H_
#define V8_BASELINE_BASELINE_ASSEMBLER_INL_H_
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#include <type_traits>
#include <unordered_map>
#include "src/baseline/baseline-assembler.h"
#include "src/interpreter/bytecode-register.h"
#include "src/objects/feedback-cell.h"
#include "src/objects/js-function.h"
#include "src/objects/map.h"
#if V8_TARGET_ARCH_X64
#include "src/baseline/x64/baseline-assembler-x64-inl.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/baseline/arm64/baseline-assembler-arm64-inl.h"
#else
#error Unsupported target architecture.
#endif
namespace v8 {
namespace internal {
namespace baseline {
#define __ masm_->
void BaselineAssembler::GetCode(Isolate* isolate, CodeDesc* desc) {
__ GetCode(isolate, desc);
}
int BaselineAssembler::pc_offset() const { return __ pc_offset(); }
bool BaselineAssembler::emit_debug_code() const { return __ emit_debug_code(); }
void BaselineAssembler::CodeEntry() const { __ CodeEntry(); }
void BaselineAssembler::ExceptionHandler() const { __ ExceptionHandler(); }
void BaselineAssembler::RecordComment(const char* string) {
__ RecordComment(string);
}
void BaselineAssembler::Trap() { __ Trap(); }
void BaselineAssembler::DebugBreak() { __ DebugBreak(); }
void BaselineAssembler::CallRuntime(Runtime::FunctionId function, int nargs) {
__ CallRuntime(function, nargs);
}
MemOperand BaselineAssembler::ContextOperand() {
return RegisterFrameOperand(interpreter::Register::current_context());
}
MemOperand BaselineAssembler::FunctionOperand() {
return RegisterFrameOperand(interpreter::Register::function_closure());
}
void BaselineAssembler::LoadMap(Register output, Register value) {
__ LoadMap(output, value);
}
void BaselineAssembler::LoadRoot(Register output, RootIndex index) {
__ LoadRoot(output, index);
}
void BaselineAssembler::LoadNativeContextSlot(Register output, uint32_t index) {
__ LoadNativeContextSlot(index, output);
}
void BaselineAssembler::Move(Register output, interpreter::Register source) {
return __ Move(output, RegisterFrameOperand(source));
}
void BaselineAssembler::Move(Register output, RootIndex source) {
return __ LoadRoot(output, source);
}
void BaselineAssembler::Move(Register output, Register source) {
__ Move(output, source);
}
void BaselineAssembler::Move(Register output, MemOperand operand) {
__ Move(output, operand);
}
void BaselineAssembler::Move(Register output, Smi value) {
__ Move(output, value);
}
void BaselineAssembler::SmiUntag(Register reg) { __ SmiUntag(reg); }
void BaselineAssembler::SmiUntag(Register output, Register value) {
__ SmiUntag(output, value);
}
void BaselineAssembler::LoadFixedArrayElement(Register output, Register array,
int32_t index) {
LoadTaggedAnyField(output, array,
FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::LoadPrototype(Register prototype, Register object) {
__ LoadMap(prototype, object);
LoadTaggedPointerField(prototype, prototype, Map::kPrototypeOffset);
}
void BaselineAssembler::LoadContext(Register output) {
LoadRegister(output, interpreter::Register::current_context());
}
void BaselineAssembler::LoadFunction(Register output) {
LoadRegister(output, interpreter::Register::function_closure());
}
void BaselineAssembler::StoreContext(Register context) {
StoreRegister(interpreter::Register::current_context(), context);
}
void BaselineAssembler::LoadRegister(Register output,
interpreter::Register source) {
Move(output, source);
}
void BaselineAssembler::StoreRegister(interpreter::Register output,
Register value) {
Move(output, value);
}
SaveAccumulatorScope::SaveAccumulatorScope(BaselineAssembler* assembler)
: assembler_(assembler) {
assembler_->Push(kInterpreterAccumulatorRegister);
}
SaveAccumulatorScope::~SaveAccumulatorScope() {
assembler_->Pop(kInterpreterAccumulatorRegister);
}
#undef __
} // namespace baseline
} // namespace internal
} // namespace v8
#endif
#endif // V8_BASELINE_BASELINE_ASSEMBLER_INL_H_

View File

@ -0,0 +1,187 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_BASELINE_ASSEMBLER_H_
#define V8_BASELINE_BASELINE_ASSEMBLER_H_
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#include "src/codegen/macro-assembler.h"
namespace v8 {
namespace internal {
namespace baseline {
enum class Condition : uint8_t;
class BaselineAssembler {
public:
class ScratchRegisterScope;
explicit BaselineAssembler(MacroAssembler* masm) : masm_(masm) {}
inline static MemOperand RegisterFrameOperand(
interpreter::Register interpreter_register);
inline MemOperand ContextOperand();
inline MemOperand FunctionOperand();
inline MemOperand FeedbackVectorOperand();
inline void GetCode(Isolate* isolate, CodeDesc* desc);
inline int pc_offset() const;
inline bool emit_debug_code() const;
inline void CodeEntry() const;
inline void ExceptionHandler() const;
inline void RecordComment(const char* string);
inline void Trap();
inline void DebugBreak();
inline void Bind(Label* label);
inline void JumpIf(Condition cc, Label* target,
Label::Distance distance = Label::kFar);
inline void Jump(Label* target, Label::Distance distance = Label::kFar);
inline void JumpIfRoot(Register value, RootIndex index, Label* target,
Label::Distance distance = Label::kFar);
inline void JumpIfNotRoot(Register value, RootIndex index, Label* target,
Label ::Distance distance = Label::kFar);
inline void JumpIfSmi(Register value, Label* target,
Label::Distance distance = Label::kFar);
inline void JumpIfNotSmi(Register value, Label* target,
Label::Distance distance = Label::kFar);
inline void Test(Register value, int mask);
inline void CmpObjectType(Register object, InstanceType instance_type,
Register map);
inline void CmpInstanceType(Register value, InstanceType instance_type);
inline void Cmp(Register value, Smi smi);
inline void ComparePointer(Register value, MemOperand operand);
inline Condition CheckSmi(Register value);
inline void SmiCompare(Register lhs, Register rhs);
inline void CompareTagged(Register value, MemOperand operand);
inline void CompareTagged(MemOperand operand, Register value);
inline void CompareByte(Register value, int32_t byte);
inline void LoadMap(Register output, Register value);
inline void LoadRoot(Register output, RootIndex index);
inline void LoadNativeContextSlot(Register output, uint32_t index);
inline void Move(Register output, Register source);
inline void Move(Register output, MemOperand operand);
inline void Move(Register output, Smi value);
inline void Move(Register output, TaggedIndex value);
inline void Move(Register output, interpreter::Register source);
inline void Move(interpreter::Register output, Register source);
inline void Move(Register output, RootIndex source);
inline void Move(MemOperand output, Register source);
inline void Move(Register output, ExternalReference reference);
inline void Move(Register output, Handle<HeapObject> value);
inline void Move(Register output, int32_t immediate);
inline void MoveMaybeSmi(Register output, Register source);
inline void MoveSmi(Register output, Register source);
// Push the given values, in the given order. If the stack needs alignment
// (looking at you Arm64), the stack is padded from the front (i.e. before the
// first value is pushed).
//
// This supports pushing a RegisterList as the last value -- the list is
// iterated and each interpreter Register is pushed.
//
// The total number of values pushed is returned. Note that this might be
// different from sizeof(T...), specifically if there was a RegisterList.
template <typename... T>
inline int Push(T... vals);
// Like Push(vals...), but pushes in reverse order, to support our reversed
// order argument JS calling convention. Doesn't return the number of
// arguments pushed though.
//
// Note that padding is still inserted before the first pushed value (i.e. the
// last value).
template <typename... T>
inline void PushReverse(T... vals);
// Pop values off the stack into the given registers.
//
// Note that this inserts into registers in the given order, i.e. in reverse
// order if the registers were pushed. This means that to spill registers,
// push and pop have to be in reverse order, e.g.
//
// Push(r1, r2, ..., rN);
// ClobberRegisters();
// Pop(rN, ..., r2, r1);
//
// On stack-alignment architectures, any padding is popped off after the last
// register. This the behaviour of Push, which means that the above code still
// works even if the number of registers doesn't match stack alignment.
template <typename... T>
inline void Pop(T... registers);
inline void CallBuiltin(Builtins::Name builtin);
inline void TailCallBuiltin(Builtins::Name builtin);
inline void CallRuntime(Runtime::FunctionId function, int nargs);
inline void LoadTaggedPointerField(Register output, Register source,
int offset);
inline void LoadTaggedSignedField(Register output, Register source,
int offset);
inline void LoadTaggedAnyField(Register output, Register source, int offset);
inline void LoadByteField(Register output, Register source, int offset);
inline void StoreTaggedSignedField(Register target, int offset, Smi value);
inline void StoreTaggedFieldWithWriteBarrier(Register target, int offset,
Register value);
inline void StoreTaggedFieldNoWriteBarrier(Register target, int offset,
Register value);
inline void LoadFixedArrayElement(Register output, Register array,
int32_t index);
inline void LoadPrototype(Register prototype, Register object);
// Loads the feedback cell from the function, and sets flags on add so that
// we can compare afterward.
inline void AddToInterruptBudget(int32_t weight);
inline void AddToInterruptBudget(Register weight);
inline void AddSmi(Register lhs, Smi rhs);
inline void SmiUntag(Register value);
inline void SmiUntag(Register output, Register value);
inline void Switch(Register reg, int case_value_base, Label** labels,
int num_labels);
// Register operands.
inline void LoadRegister(Register output, interpreter::Register source);
inline void StoreRegister(interpreter::Register output, Register value);
// Frame values
inline void LoadFunction(Register output);
inline void LoadContext(Register output);
inline void StoreContext(Register context);
inline static void EmitReturn(MacroAssembler* masm);
MacroAssembler* masm() { return masm_; }
private:
MacroAssembler* masm_;
ScratchRegisterScope* scratch_register_scope_ = nullptr;
};
class SaveAccumulatorScope final {
public:
inline explicit SaveAccumulatorScope(BaselineAssembler* assembler);
inline ~SaveAccumulatorScope();
private:
BaselineAssembler* assembler_;
};
} // namespace baseline
} // namespace internal
} // namespace v8
#endif
#endif // V8_BASELINE_BASELINE_ASSEMBLER_H_

View File

@ -11,6 +11,7 @@
#include <type_traits>
#include <unordered_map>
#include "src/baseline/baseline-assembler-inl.h"
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins-descriptors.h"
#include "src/builtins/builtins.h"
@ -24,7 +25,6 @@
#include "src/interpreter/bytecode-array-accessor.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-register.h"
#include "src/objects/code.h"
#include "src/objects/heap-object.h"
#include "src/objects/instance-type.h"
@ -220,91 +220,6 @@ void MoveArgumentsForDescriptor(BaselineAssembler* masm,
} // namespace detail
#define __ masm_->
void BaselineAssembler::GetCode(Isolate* isolate, CodeDesc* desc) {
__ GetCode(isolate, desc);
}
int BaselineAssembler::pc_offset() const { return __ pc_offset(); }
bool BaselineAssembler::emit_debug_code() const { return __ emit_debug_code(); }
void BaselineAssembler::CodeEntry() const { __ CodeEntry(); }
void BaselineAssembler::ExceptionHandler() const { __ ExceptionHandler(); }
void BaselineAssembler::RecordComment(const char* string) {
__ RecordComment(string);
}
void BaselineAssembler::Trap() { __ Trap(); }
void BaselineAssembler::DebugBreak() { __ DebugBreak(); }
void BaselineAssembler::CallRuntime(Runtime::FunctionId function, int nargs) {
__ CallRuntime(function, nargs);
}
MemOperand BaselineAssembler::ContextOperand() {
return RegisterFrameOperand(interpreter::Register::current_context());
}
MemOperand BaselineAssembler::FunctionOperand() {
return RegisterFrameOperand(interpreter::Register::function_closure());
}
void BaselineAssembler::LoadMap(Register output, Register value) {
__ LoadMap(output, value);
}
void BaselineAssembler::LoadRoot(Register output, RootIndex index) {
__ LoadRoot(output, index);
}
void BaselineAssembler::LoadNativeContextSlot(Register output, uint32_t index) {
__ LoadNativeContextSlot(index, output);
}
void BaselineAssembler::Move(Register output, interpreter::Register source) {
return __ Move(output, RegisterFrameOperand(source));
}
void BaselineAssembler::Move(Register output, RootIndex source) {
return __ LoadRoot(output, source);
}
void BaselineAssembler::Move(Register output, Register source) {
__ Move(output, source);
}
void BaselineAssembler::Move(Register output, MemOperand operand) {
__ Move(output, operand);
}
void BaselineAssembler::Move(Register output, Smi value) {
__ Move(output, value);
}
void BaselineAssembler::SmiUntag(Register reg) { __ SmiUntag(reg); }
void BaselineAssembler::SmiUntag(Register output, Register value) {
__ SmiUntag(output, value);
}
void BaselineAssembler::LoadFixedArrayElement(Register output, Register array,
int32_t index) {
LoadTaggedAnyField(output, array,
FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::LoadPrototype(Register prototype, Register object) {
__ LoadMap(prototype, object);
LoadTaggedPointerField(prototype, prototype, Map::kPrototypeOffset);
}
void BaselineAssembler::LoadContext(Register output) {
LoadRegister(output, interpreter::Register::current_context());
}
void BaselineAssembler::LoadFunction(Register output) {
LoadRegister(output, interpreter::Register::function_closure());
}
void BaselineAssembler::StoreContext(Register context) {
StoreRegister(interpreter::Register::current_context(), context);
}
void BaselineAssembler::LoadRegister(Register output,
interpreter::Register source) {
Move(output, source);
}
void BaselineAssembler::StoreRegister(interpreter::Register output,
Register value) {
Move(output, value);
}
#undef __
BaselineCompiler::BaselineCompiler(
Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info,

View File

@ -13,7 +13,7 @@
#include "src/base/logging.h"
#include "src/base/threaded-list.h"
#include "src/codegen/macro-assembler.h"
#include "src/baseline/baseline-assembler.h"
#include "src/handles/handles.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-register.h"
@ -30,8 +30,6 @@ class BytecodeArray;
namespace baseline {
enum class Condition : uint8_t;
class BytecodeOffsetTableBuilder {
public:
void AddPosition(size_t pc_offset, size_t bytecode_offset) {
@ -61,165 +59,6 @@ class BytecodeOffsetTableBuilder {
std::vector<byte> bytes_;
};
class BaselineAssembler {
public:
class ScratchRegisterScope;
explicit BaselineAssembler(MacroAssembler* masm) : masm_(masm) {}
static MemOperand RegisterFrameOperand(
interpreter::Register interpreter_register);
MemOperand ContextOperand();
MemOperand FunctionOperand();
MemOperand FeedbackVectorOperand();
void GetCode(Isolate* isolate, CodeDesc* desc);
int pc_offset() const;
bool emit_debug_code() const;
void CodeEntry() const;
void ExceptionHandler() const;
void RecordComment(const char* string);
void Trap();
void DebugBreak();
void Bind(Label* label);
void JumpIf(Condition cc, Label* target,
Label::Distance distance = Label::kFar);
void Jump(Label* target, Label::Distance distance = Label::kFar);
void JumpIfRoot(Register value, RootIndex index, Label* target,
Label::Distance distance = Label::kFar);
void JumpIfNotRoot(Register value, RootIndex index, Label* target,
Label ::Distance distance = Label::kFar);
void JumpIfSmi(Register value, Label* target,
Label::Distance distance = Label::kFar);
void JumpIfNotSmi(Register value, Label* target,
Label::Distance distance = Label::kFar);
void Test(Register value, int mask);
void CmpObjectType(Register object, InstanceType instance_type, Register map);
void CmpInstanceType(Register value, InstanceType instance_type);
void Cmp(Register value, Smi smi);
void ComparePointer(Register value, MemOperand operand);
Condition CheckSmi(Register value);
void SmiCompare(Register lhs, Register rhs);
void CompareTagged(Register value, MemOperand operand);
void CompareTagged(MemOperand operand, Register value);
void CompareByte(Register value, int32_t byte);
void LoadMap(Register output, Register value);
void LoadRoot(Register output, RootIndex index);
void LoadNativeContextSlot(Register output, uint32_t index);
void Move(Register output, Register source);
void Move(Register output, MemOperand operand);
void Move(Register output, Smi value);
void Move(Register output, TaggedIndex value);
void Move(Register output, interpreter::Register source);
void Move(interpreter::Register output, Register source);
void Move(Register output, RootIndex source);
void Move(MemOperand output, Register source);
void Move(Register output, ExternalReference reference);
void Move(Register output, Handle<HeapObject> value);
void Move(Register output, int32_t immediate);
void MoveMaybeSmi(Register output, Register source);
void MoveSmi(Register output, Register source);
// Push the given values, in the given order. If the stack needs alignment
// (looking at you Arm64), the stack is padded from the front (i.e. before the
// first value is pushed).
//
// This supports pushing a RegisterList as the last value -- the list is
// iterated and each interpreter Register is pushed.
//
// The total number of values pushed is returned. Note that this might be
// different from sizeof(T...), specifically if there was a RegisterList.
template <typename... T>
int Push(T... vals);
// Like Push(vals...), but pushes in reverse order, to support our reversed
// order argument JS calling convention. Doesn't return the number of
// arguments pushed though.
//
// Note that padding is still inserted before the first pushed value (i.e. the
// last value).
template <typename... T>
void PushReverse(T... vals);
// Pop values off the stack into the given registers.
//
// Note that this inserts into registers in the given order, i.e. in reverse
// order if the registers were pushed. This means that to spill registers,
// push and pop have to be in reverse order, e.g.
//
// Push(r1, r2, ..., rN);
// ClobberRegisters();
// Pop(rN, ..., r2, r1);
//
// On stack-alignment architectures, any padding is popped off after the last
// register. This the behaviour of Push, which means that the above code still
// works even if the number of registers doesn't match stack alignment.
template <typename... T>
void Pop(T... registers);
void CallBuiltin(Builtins::Name builtin);
void TailCallBuiltin(Builtins::Name builtin);
void CallRuntime(Runtime::FunctionId function, int nargs);
void LoadTaggedPointerField(Register output, Register source, int offset);
void LoadTaggedSignedField(Register output, Register source, int offset);
void LoadTaggedAnyField(Register output, Register source, int offset);
void LoadByteField(Register output, Register source, int offset);
void StoreTaggedSignedField(Register target, int offset, Smi value);
void StoreTaggedFieldWithWriteBarrier(Register target, int offset,
Register value);
void StoreTaggedFieldNoWriteBarrier(Register target, int offset,
Register value);
void LoadFixedArrayElement(Register output, Register array, int32_t index);
void LoadPrototype(Register prototype, Register object);
// Loads the feedback cell from the function, and sets flags on add so that
// we can compare afterward.
void AddToInterruptBudget(int32_t weight);
void AddToInterruptBudget(Register weight);
void AddSmi(Register lhs, Smi rhs);
void SmiUntag(Register value);
void SmiUntag(Register output, Register value);
void Switch(Register reg, int case_value_base, Label** labels,
int num_labels);
// Register operands.
void LoadRegister(Register output, interpreter::Register source);
void StoreRegister(interpreter::Register output, Register value);
// Frame values
void LoadFunction(Register output);
void LoadContext(Register output);
void StoreContext(Register context);
static void EmitReturn(MacroAssembler* masm);
MacroAssembler* masm() { return masm_; }
private:
MacroAssembler* masm_;
ScratchRegisterScope* scratch_register_scope_ = nullptr;
};
class SaveAccumulatorScope final {
public:
explicit SaveAccumulatorScope(BaselineAssembler* assembler)
: assembler_(assembler) {
assembler_->Push(kInterpreterAccumulatorRegister);
}
~SaveAccumulatorScope() { assembler_->Pop(kInterpreterAccumulatorRegister); }
private:
BaselineAssembler* assembler_;
};
class BaselineCompiler {
public:
explicit BaselineCompiler(Isolate* isolate,

View File

@ -8,6 +8,7 @@
// architectures.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#include "src/baseline/baseline-assembler-inl.h"
#include "src/baseline/baseline-compiler.h"
#include "src/heap/factory-inl.h"
#include "src/logging/counters.h"

View File

@ -0,0 +1,433 @@
// Use of this source code is governed by a BSD-style license that can be
// Copyright 2021 the V8 project authors. All rights reserved.
// found in the LICENSE file.
#ifndef V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_
#define V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/x64/register-x64.h"
namespace v8 {
namespace internal {
namespace baseline {
namespace detail {
// Avoid using kScratchRegister(==r10) since the macro-assembler doesn't use
// this scope and will conflict.
static constexpr Register kScratchRegisters[] = {r8, r9, r11, r12, r14, r15};
static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters);
} // namespace detail
// TODO(v8:11429): Move BaselineAssembler to baseline-assembler-<arch>-inl.h
class BaselineAssembler::ScratchRegisterScope {
public:
explicit ScratchRegisterScope(BaselineAssembler* assembler)
: assembler_(assembler),
prev_scope_(assembler->scratch_register_scope_),
registers_used_(prev_scope_ == nullptr ? 0
: prev_scope_->registers_used_) {
assembler_->scratch_register_scope_ = this;
}
~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
Register AcquireScratch() {
DCHECK_LT(registers_used_, detail::kNumScratchRegisters);
return detail::kScratchRegisters[registers_used_++];
}
private:
BaselineAssembler* assembler_;
ScratchRegisterScope* prev_scope_;
int registers_used_;
};
// TODO(v8:11461): Unify condition names in the MacroAssembler.
enum class Condition : uint8_t {
kEqual = equal,
kNotEqual = not_equal,
kLessThan = less,
kGreaterThan = greater,
kLessThanEqual = less_equal,
kGreaterThanEqual = greater_equal,
kUnsignedLessThan = below,
kUnsignedGreaterThan = above,
kUnsignedLessThanEqual = below_equal,
kUnsignedGreaterThanEqual = above_equal,
kOverflow = overflow,
kNoOverflow = no_overflow,
kZero = zero,
kNotZero = not_zero,
};
inline internal::Condition AsMasmCondition(Condition cond) {
return static_cast<internal::Condition>(cond);
}
namespace detail {
#define __ masm_->
#ifdef DEBUG
inline bool Clobbers(Register target, MemOperand op) {
return op.AddressUsesRegister(target);
}
#endif
} // namespace detail
MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(rbp, interpreter_register.ToOperand() * kSystemPointerSize);
}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(rbp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ jmp(target, distance);
}
void BaselineAssembler::JumpIf(Condition cc, Label* target,
Label::Distance distance) {
__ j(AsMasmCondition(cc), target, distance);
}
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance distance) {
__ JumpIfRoot(value, index, target, distance);
}
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance distance) {
__ JumpIfNotRoot(value, index, target, distance);
}
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance distance) {
__ JumpIfSmi(value, target, distance);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance distance) {
__ JumpIfNotSmi(value, target, distance);
}
void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
__ RecordCommentForOffHeapTrampoline(builtin);
__ Call(__ EntryFromBuiltinIndexAsOperand(builtin));
if (FLAG_code_comments) __ RecordComment("]");
}
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
__ RecordCommentForOffHeapTrampoline(builtin);
__ Jump(__ EntryFromBuiltinIndexAsOperand(builtin));
if (FLAG_code_comments) __ RecordComment("]");
}
void BaselineAssembler::Test(Register value, int mask) {
if ((mask & 0xff) == mask) {
__ testb(value, Immediate(mask));
} else {
__ testl(value, Immediate(mask));
}
}
void BaselineAssembler::CmpObjectType(Register object,
InstanceType instance_type,
Register map) {
__ CmpObjectType(object, instance_type, map);
}
void BaselineAssembler::CmpInstanceType(Register value,
InstanceType instance_type) {
__ CmpInstanceType(value, instance_type);
}
void BaselineAssembler::Cmp(Register value, Smi smi) { __ Cmp(value, smi); }
void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
__ cmpq(value, operand);
}
void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
__ SmiCompare(lhs, rhs);
}
// cmp_tagged
void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
__ cmp_tagged(value, operand);
}
void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
__ cmp_tagged(operand, value);
}
void BaselineAssembler::CompareByte(Register value, int32_t byte) {
__ cmpb(value, Immediate(byte));
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
return __ movq(RegisterFrameOperand(output), source);
}
void BaselineAssembler::Move(Register output, TaggedIndex value) {
__ Move(output, value);
}
void BaselineAssembler::Move(MemOperand output, Register source) {
__ movq(output, source);
}
void BaselineAssembler::Move(Register output, ExternalReference reference) {
__ Move(output, reference);
}
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
__ Move(output, value);
}
void BaselineAssembler::Move(Register output, int32_t value) {
__ Move(output, Immediate(value));
}
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
__ mov_tagged(output, source);
}
void BaselineAssembler::MoveSmi(Register output, Register source) {
__ mov_tagged(output, source);
}
namespace detail {
inline void PushSingle(MacroAssembler* masm, RootIndex source) {
masm->PushRoot(source);
}
inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
inline void PushSingle(MacroAssembler* masm, TaggedIndex value) {
masm->Push(value);
}
inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); }
inline void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
masm->Push(object);
}
inline void PushSingle(MacroAssembler* masm, int32_t immediate) {
masm->Push(Immediate(immediate));
}
inline void PushSingle(MacroAssembler* masm, MemOperand operand) {
masm->Push(operand);
}
inline void PushSingle(MacroAssembler* masm, interpreter::Register source) {
return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source));
}
template <typename Arg>
struct PushHelper {
static int Push(BaselineAssembler* basm, Arg arg) {
PushSingle(basm->masm(), arg);
return 1;
}
static int PushReverse(BaselineAssembler* basm, Arg arg) {
return Push(basm, arg);
}
};
template <>
struct PushHelper<interpreter::RegisterList> {
static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
PushSingle(basm->masm(), list[reg_index]);
}
return list.register_count();
}
static int PushReverse(BaselineAssembler* basm,
interpreter::RegisterList list) {
for (int reg_index = list.register_count() - 1; reg_index >= 0;
--reg_index) {
PushSingle(basm->masm(), list[reg_index]);
}
return list.register_count();
}
};
template <typename... Args>
struct PushAllHelper;
template <>
struct PushAllHelper<> {
static int Push(BaselineAssembler* masm) { return 0; }
static int PushReverse(BaselineAssembler* masm) { return 0; }
};
template <typename Arg, typename... Args>
struct PushAllHelper<Arg, Args...> {
static int Push(BaselineAssembler* masm, Arg arg, Args... args) {
int nargs = PushHelper<Arg>::Push(masm, arg);
return nargs + PushAllHelper<Args...>::Push(masm, args...);
}
static int PushReverse(BaselineAssembler* masm, Arg arg, Args... args) {
int nargs = PushAllHelper<Args...>::PushReverse(masm, args...);
return nargs + PushHelper<Arg>::PushReverse(masm, arg);
}
};
} // namespace detail
template <typename... T>
int BaselineAssembler::Push(T... vals) {
return detail::PushAllHelper<T...>::Push(this, vals...);
}
template <typename... T>
void BaselineAssembler::PushReverse(T... vals) {
detail::PushAllHelper<T...>::PushReverse(this, vals...);
}
template <typename... T>
void BaselineAssembler::Pop(T... registers) {
ITERATE_PACK(__ Pop(registers));
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
__ LoadTaggedSignedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadByteField(Register output, Register source,
int offset) {
__ movb(output, FieldOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
__ StoreTaggedSignedField(FieldOperand(target, offset), value);
}
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
BaselineAssembler::ScratchRegisterScope scratch_scope(this);
Register scratch = scratch_scope.AcquireScratch();
DCHECK_NE(target, scratch);
DCHECK_NE(value, scratch);
__ StoreTaggedField(FieldOperand(target, offset), value);
__ RecordWriteField(target, offset, value, scratch, kDontSaveFPRegs);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
__ StoreTaggedField(FieldOperand(target, offset), value);
}
void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
__ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
Immediate(weight));
}
void BaselineAssembler::AddToInterruptBudget(Register weight) {
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
__ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
weight);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return;
if (SmiValuesAre31Bits()) {
__ addl(lhs, Immediate(rhs));
} else {
ScratchRegisterScope scratch_scope(this);
Register rhs_reg = scratch_scope.AcquireScratch();
__ Move(rhs_reg, rhs);
__ addq(lhs, rhs_reg);
}
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ScratchRegisterScope scope(this);
Register table = scope.AcquireScratch();
Label fallthrough, jump_table;
if (case_value_base > 0) {
__ subq(reg, Immediate(case_value_base));
}
__ cmpq(reg, Immediate(num_labels));
__ j(above_equal, &fallthrough);
__ leaq(table, MemOperand(&jump_table));
__ jmp(MemOperand(table, reg, times_8, 0));
// Emit the jump table inline, under the assumption that it's not too big.
__ Align(kSystemPointerSize);
__ bind(&jump_table);
for (int i = 0; i < num_labels; ++i) {
__ dq(labels[i]);
}
__ bind(&fallthrough);
}
#undef __
#define __ basm.
void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
BaselineAssembler basm(masm);
Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
__ RecordComment("[ Update Interrupt Budget");
__ AddToInterruptBudget(weight);
// Use compare flags set by AddToInterruptBudget
Label skip_interrupt_label;
__ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
{
__ masm()->SmiTag(params_size);
__ Push(params_size, kInterpreterAccumulatorRegister);
__ LoadContext(kContextRegister);
__ Push(MemOperand(rbp, InterpreterFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
__ Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size);
}
__ RecordComment("]");
__ Bind(&skip_interrupt_label);
BaselineAssembler::ScratchRegisterScope scope(&basm);
Register scratch = scope.AcquireScratch();
Register actual_params_size = scratch;
// Compute the size of the actual parameters + receiver (in bytes).
__ masm()->movq(actual_params_size,
MemOperand(rbp, StandardFrameConstants::kArgCOffset));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
__ masm()->cmpq(params_size, actual_params_size);
__ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count, Label::kNear);
__ masm()->movq(params_size, actual_params_size);
__ Bind(&corrected_args_count);
// Leave the frame (also dropping the register file).
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
Register return_pc = scratch;
__ masm()->PopReturnAddressTo(return_pc);
__ masm()->leaq(rsp, MemOperand(rsp, params_size, times_system_pointer_size,
kSystemPointerSize));
__ masm()->PushReturnAddressFrom(return_pc);
__ masm()->Ret();
}
#undef __
} // namespace baseline
} // namespace internal
} // namespace v8
#endif // V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_

View File

@ -8,366 +8,11 @@
#include "src/base/macros.h"
#include "src/baseline/baseline-compiler.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/x64/register-x64.h"
#include "src/objects/code-inl.h"
namespace v8 {
namespace internal {
namespace baseline {
namespace detail {
// Avoid using kScratchRegister(==r10) since the macro-assembler doesn't use
// this scope and will conflict.
static constexpr Register kScratchRegisters[] = {r8, r9, r11, r12, r14, r15};
static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters);
} // namespace detail
// TODO(v8:11429): Move BaselineAssembler to baseline-assembler-<arch>-inl.h
class BaselineAssembler::ScratchRegisterScope {
public:
explicit ScratchRegisterScope(BaselineAssembler* assembler)
: assembler_(assembler),
prev_scope_(assembler->scratch_register_scope_),
registers_used_(prev_scope_ == nullptr ? 0
: prev_scope_->registers_used_) {
assembler_->scratch_register_scope_ = this;
}
~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
Register AcquireScratch() {
DCHECK_LT(registers_used_, detail::kNumScratchRegisters);
return detail::kScratchRegisters[registers_used_++];
}
private:
BaselineAssembler* assembler_;
ScratchRegisterScope* prev_scope_;
int registers_used_;
};
// TODO(v8:11461): Unify condition names in the MacroAssembler.
enum class Condition : uint8_t {
kEqual = equal,
kNotEqual = not_equal,
kLessThan = less,
kGreaterThan = greater,
kLessThanEqual = less_equal,
kGreaterThanEqual = greater_equal,
kUnsignedLessThan = below,
kUnsignedGreaterThan = above,
kUnsignedLessThanEqual = below_equal,
kUnsignedGreaterThanEqual = above_equal,
kOverflow = overflow,
kNoOverflow = no_overflow,
kZero = zero,
kNotZero = not_zero,
};
internal::Condition AsMasmCondition(Condition cond) {
return static_cast<internal::Condition>(cond);
}
namespace detail {
#define __ masm_->
#ifdef DEBUG
bool Clobbers(Register target, MemOperand op) {
return op.AddressUsesRegister(target);
}
#endif
} // namespace detail
MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(rbp, interpreter_register.ToOperand() * kSystemPointerSize);
}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(rbp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ jmp(target, distance);
}
void BaselineAssembler::JumpIf(Condition cc, Label* target,
Label::Distance distance) {
__ j(AsMasmCondition(cc), target, distance);
}
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance distance) {
__ JumpIfRoot(value, index, target, distance);
}
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance distance) {
__ JumpIfNotRoot(value, index, target, distance);
}
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance distance) {
__ JumpIfSmi(value, target, distance);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance distance) {
__ JumpIfNotSmi(value, target, distance);
}
void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
__ RecordCommentForOffHeapTrampoline(builtin);
__ Call(__ EntryFromBuiltinIndexAsOperand(builtin));
if (FLAG_code_comments) __ RecordComment("]");
}
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
__ RecordCommentForOffHeapTrampoline(builtin);
__ Jump(__ EntryFromBuiltinIndexAsOperand(builtin));
if (FLAG_code_comments) __ RecordComment("]");
}
void BaselineAssembler::Test(Register value, int mask) {
if ((mask & 0xff) == mask) {
__ testb(value, Immediate(mask));
} else {
__ testl(value, Immediate(mask));
}
}
void BaselineAssembler::CmpObjectType(Register object,
InstanceType instance_type,
Register map) {
__ CmpObjectType(object, instance_type, map);
}
void BaselineAssembler::CmpInstanceType(Register value,
InstanceType instance_type) {
__ CmpInstanceType(value, instance_type);
}
void BaselineAssembler::Cmp(Register value, Smi smi) { __ Cmp(value, smi); }
void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
__ cmpq(value, operand);
}
void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
__ SmiCompare(lhs, rhs);
}
// cmp_tagged
void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
__ cmp_tagged(value, operand);
}
void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
__ cmp_tagged(operand, value);
}
void BaselineAssembler::CompareByte(Register value, int32_t byte) {
__ cmpb(value, Immediate(byte));
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
return __ movq(RegisterFrameOperand(output), source);
}
void BaselineAssembler::Move(Register output, TaggedIndex value) {
__ Move(output, value);
}
void BaselineAssembler::Move(MemOperand output, Register source) {
__ movq(output, source);
}
void BaselineAssembler::Move(Register output, ExternalReference reference) {
__ Move(output, reference);
}
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
__ Move(output, value);
}
void BaselineAssembler::Move(Register output, int32_t value) {
__ Move(output, Immediate(value));
}
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
__ mov_tagged(output, source);
}
void BaselineAssembler::MoveSmi(Register output, Register source) {
__ mov_tagged(output, source);
}
namespace detail {
void PushSingle(MacroAssembler* masm, RootIndex source) {
masm->PushRoot(source);
}
void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
void PushSingle(MacroAssembler* masm, TaggedIndex value) { masm->Push(value); }
void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); }
void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
masm->Push(object);
}
void PushSingle(MacroAssembler* masm, int32_t immediate) {
masm->Push(Immediate(immediate));
}
void PushSingle(MacroAssembler* masm, MemOperand operand) {
masm->Push(operand);
}
void PushSingle(MacroAssembler* masm, interpreter::Register source) {
return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source));
}
template <typename Arg>
struct PushHelper {
static int Push(BaselineAssembler* basm, Arg arg) {
PushSingle(basm->masm(), arg);
return 1;
}
static int PushReverse(BaselineAssembler* basm, Arg arg) {
return Push(basm, arg);
}
};
template <>
struct PushHelper<interpreter::RegisterList> {
static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
PushSingle(basm->masm(), list[reg_index]);
}
return list.register_count();
}
static int PushReverse(BaselineAssembler* basm,
interpreter::RegisterList list) {
for (int reg_index = list.register_count() - 1; reg_index >= 0;
--reg_index) {
PushSingle(basm->masm(), list[reg_index]);
}
return list.register_count();
}
};
template <typename... Args>
struct PushAllHelper;
template <>
struct PushAllHelper<> {
static int Push(BaselineAssembler* masm) { return 0; }
static int PushReverse(BaselineAssembler* masm) { return 0; }
};
template <typename Arg, typename... Args>
struct PushAllHelper<Arg, Args...> {
static int Push(BaselineAssembler* masm, Arg arg, Args... args) {
int nargs = PushHelper<Arg>::Push(masm, arg);
return nargs + PushAllHelper<Args...>::Push(masm, args...);
}
static int PushReverse(BaselineAssembler* masm, Arg arg, Args... args) {
int nargs = PushAllHelper<Args...>::PushReverse(masm, args...);
return nargs + PushHelper<Arg>::PushReverse(masm, arg);
}
};
} // namespace detail
template <typename... T>
int BaselineAssembler::Push(T... vals) {
return detail::PushAllHelper<T...>::Push(this, vals...);
}
template <typename... T>
void BaselineAssembler::PushReverse(T... vals) {
detail::PushAllHelper<T...>::PushReverse(this, vals...);
}
template <typename... T>
void BaselineAssembler::Pop(T... registers) {
ITERATE_PACK(__ Pop(registers));
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
__ LoadTaggedSignedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadByteField(Register output, Register source,
int offset) {
__ movb(output, FieldOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
__ StoreTaggedSignedField(FieldOperand(target, offset), value);
}
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
BaselineAssembler::ScratchRegisterScope scratch_scope(this);
Register scratch = scratch_scope.AcquireScratch();
DCHECK_NE(target, scratch);
DCHECK_NE(value, scratch);
__ StoreTaggedField(FieldOperand(target, offset), value);
__ RecordWriteField(target, offset, value, scratch, kDontSaveFPRegs);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
__ StoreTaggedField(FieldOperand(target, offset), value);
}
void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
__ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
Immediate(weight));
}
void BaselineAssembler::AddToInterruptBudget(Register weight) {
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
__ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
weight);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return;
if (SmiValuesAre31Bits()) {
__ addl(lhs, Immediate(rhs));
} else {
ScratchRegisterScope scratch_scope(this);
Register rhs_reg = scratch_scope.AcquireScratch();
__ Move(rhs_reg, rhs);
__ addq(lhs, rhs_reg);
}
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ScratchRegisterScope scope(this);
Register table = scope.AcquireScratch();
Label fallthrough, jump_table;
if (case_value_base > 0) {
__ subq(reg, Immediate(case_value_base));
}
__ cmpq(reg, Immediate(num_labels));
__ j(above_equal, &fallthrough);
__ leaq(table, MemOperand(&jump_table));
__ jmp(MemOperand(table, reg, times_8, 0));
// Emit the jump table inline, under the assumption that it's not too big.
__ Align(kSystemPointerSize);
__ bind(&jump_table);
for (int i = 0; i < num_labels; ++i) {
__ dq(labels[i]);
}
__ bind(&fallthrough);
}
#undef __
#define __ basm_.
void BaselineCompiler::Prologue() {
@ -439,65 +84,6 @@ void BaselineCompiler::VerifyFrameSize() {
#undef __
#define __ basm.
void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
BaselineAssembler basm(masm);
Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
__ RecordComment("[ Update Interrupt Budget");
__ AddToInterruptBudget(weight);
// Use compare flags set by AddToInterruptBudget
Label skip_interrupt_label;
__ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
{
__ masm()->SmiTag(params_size);
__ Push(params_size, kInterpreterAccumulatorRegister);
__ LoadContext(kContextRegister);
__ Push(MemOperand(rbp, InterpreterFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
__ Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size);
}
__ RecordComment("]");
__ Bind(&skip_interrupt_label);
BaselineAssembler::ScratchRegisterScope scope(&basm);
Register scratch = scope.AcquireScratch();
Register actual_params_size = scratch;
// Compute the size of the actual parameters + receiver (in bytes).
__ masm()->movq(actual_params_size,
MemOperand(rbp, StandardFrameConstants::kArgCOffset));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
__ masm()->cmpq(params_size, actual_params_size);
__ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count, Label::kNear);
__ masm()->movq(params_size, actual_params_size);
__ Bind(&corrected_args_count);
// Leave the frame (also dropping the register file).
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
Register return_pc = scratch;
__ masm()->PopReturnAddressTo(return_pc);
__ masm()->leaq(rsp, MemOperand(rsp, params_size, times_system_pointer_size,
kSystemPointerSize));
__ masm()->PushReturnAddressFrom(return_pc);
__ masm()->Ret();
}
#undef __
} // namespace baseline
} // namespace internal
} // namespace v8