[ia32][sparkplug] Sparkplug IA32 port
Change-Id: Idece4925aa0ffa99bc34db39d20b24a41d59f84f Bug: v8:11421 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2715064 Reviewed-by: Igor Sheludko <ishell@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Commit-Queue: Victor Gomes <victorgomes@chromium.org> Cr-Commit-Position: refs/heads/master@{#73265}
This commit is contained in:
parent
fe5f67e9b5
commit
fb6d4ba104
@ -7,7 +7,7 @@
|
||||
|
||||
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
|
||||
// architectures.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
|
||||
#include <type_traits>
|
||||
#include <unordered_map>
|
||||
@ -22,6 +22,8 @@
|
||||
#include "src/baseline/x64/baseline-assembler-x64-inl.h"
|
||||
#elif V8_TARGET_ARCH_ARM64
|
||||
#include "src/baseline/arm64/baseline-assembler-arm64-inl.h"
|
||||
#elif V8_TARGET_ARCH_IA32
|
||||
#include "src/baseline/ia32/baseline-assembler-ia32-inl.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -8,9 +8,10 @@
|
||||
|
||||
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
|
||||
// architectures.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
|
||||
#include "src/codegen/macro-assembler.h"
|
||||
#include "src/objects/tagged-index.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
|
||||
// architectures.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
|
||||
#include "src/baseline/baseline-compiler.h"
|
||||
|
||||
@ -34,6 +34,8 @@
|
||||
#include "src/baseline/x64/baseline-compiler-x64-inl.h"
|
||||
#elif V8_TARGET_ARCH_ARM64
|
||||
#include "src/baseline/arm64/baseline-compiler-arm64-inl.h"
|
||||
#elif V8_TARGET_ARCH_IA32
|
||||
#include "src/baseline/ia32/baseline-compiler-ia32-inl.h"
|
||||
#else
|
||||
#error Unsupported target architecture.
|
||||
#endif
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
|
||||
// architectures.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
|
||||
// architectures.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
|
||||
#include "src/baseline/baseline-assembler-inl.h"
|
||||
#include "src/baseline/baseline-compiler.h"
|
||||
|
440
src/baseline/ia32/baseline-assembler-ia32-inl.h
Normal file
440
src/baseline/ia32/baseline-assembler-ia32-inl.h
Normal file
@ -0,0 +1,440 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_BASELINE_IA32_BASELINE_ASSEMBLER_IA32_INL_H_
|
||||
#define V8_BASELINE_IA32_BASELINE_ASSEMBLER_IA32_INL_H_
|
||||
|
||||
#include "src/baseline/baseline-assembler.h"
|
||||
#include "src/codegen/ia32/register-ia32.h"
|
||||
#include "src/codegen/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace baseline {
|
||||
|
||||
namespace detail {
|
||||
|
||||
static constexpr Register kScratchRegisters[] = {ecx, edx, esi, edi};
|
||||
static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters);
|
||||
|
||||
} // namespace detail
|
||||
|
||||
class BaselineAssembler::ScratchRegisterScope {
|
||||
public:
|
||||
explicit ScratchRegisterScope(BaselineAssembler* assembler)
|
||||
: assembler_(assembler),
|
||||
prev_scope_(assembler->scratch_register_scope_),
|
||||
registers_used_(prev_scope_ == nullptr ? 0
|
||||
: prev_scope_->registers_used_) {
|
||||
assembler_->scratch_register_scope_ = this;
|
||||
}
|
||||
~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
|
||||
|
||||
Register AcquireScratch() {
|
||||
DCHECK_LT(registers_used_, detail::kNumScratchRegisters);
|
||||
return detail::kScratchRegisters[registers_used_++];
|
||||
}
|
||||
|
||||
private:
|
||||
BaselineAssembler* assembler_;
|
||||
ScratchRegisterScope* prev_scope_;
|
||||
int registers_used_;
|
||||
};
|
||||
|
||||
// TODO(v8:11461): Unify condition names in the MacroAssembler.
|
||||
enum class Condition : uint8_t {
|
||||
kEqual = equal,
|
||||
kNotEqual = not_equal,
|
||||
|
||||
kLessThan = less,
|
||||
kGreaterThan = greater,
|
||||
kLessThanEqual = less_equal,
|
||||
kGreaterThanEqual = greater_equal,
|
||||
|
||||
kUnsignedLessThan = below,
|
||||
kUnsignedGreaterThan = above,
|
||||
kUnsignedLessThanEqual = below_equal,
|
||||
kUnsignedGreaterThanEqual = above_equal,
|
||||
|
||||
kOverflow = overflow,
|
||||
kNoOverflow = no_overflow,
|
||||
|
||||
kZero = zero,
|
||||
kNotZero = not_zero,
|
||||
};
|
||||
|
||||
inline internal::Condition AsMasmCondition(Condition cond) {
|
||||
return static_cast<internal::Condition>(cond);
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
|
||||
#define __ masm_->
|
||||
|
||||
#ifdef DEBUG
|
||||
inline bool Clobbers(Register target, MemOperand op) {
|
||||
return op.is_reg(target);
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace detail
|
||||
|
||||
MemOperand BaselineAssembler::RegisterFrameOperand(
|
||||
interpreter::Register interpreter_register) {
|
||||
return MemOperand(ebp, interpreter_register.ToOperand() * kSystemPointerSize);
|
||||
}
|
||||
MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
return MemOperand(ebp, BaselineFrameConstants::kFeedbackVectorFromFp);
|
||||
}
|
||||
|
||||
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
|
||||
|
||||
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
|
||||
__ jmp(target, distance);
|
||||
}
|
||||
void BaselineAssembler::JumpIf(Condition cc, Label* target,
|
||||
Label::Distance distance) {
|
||||
__ j(AsMasmCondition(cc), target, distance);
|
||||
}
|
||||
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
|
||||
Label* target, Label::Distance distance) {
|
||||
__ JumpIfRoot(value, index, target, distance);
|
||||
}
|
||||
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
|
||||
Label* target, Label::Distance distance) {
|
||||
__ JumpIfNotRoot(value, index, target, distance);
|
||||
}
|
||||
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
|
||||
Label::Distance distance) {
|
||||
__ JumpIfSmi(value, target, distance);
|
||||
}
|
||||
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
|
||||
Label::Distance distance) {
|
||||
__ JumpIfNotSmi(value, target, distance);
|
||||
}
|
||||
|
||||
void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
|
||||
__ RecordCommentForOffHeapTrampoline(builtin);
|
||||
__ Call(__ EntryFromBuiltinIndexAsOperand(builtin));
|
||||
if (FLAG_code_comments) __ RecordComment("]");
|
||||
}
|
||||
|
||||
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
|
||||
__ RecordCommentForOffHeapTrampoline(builtin);
|
||||
__ jmp(__ EntryFromBuiltinIndexAsOperand(builtin));
|
||||
if (FLAG_code_comments) __ RecordComment("]");
|
||||
}
|
||||
|
||||
void BaselineAssembler::Test(Register value, int mask) {
|
||||
if ((mask & 0xff) == mask) {
|
||||
__ test_b(value, Immediate(mask));
|
||||
} else {
|
||||
__ test(value, Immediate(mask));
|
||||
}
|
||||
}
|
||||
|
||||
void BaselineAssembler::CmpObjectType(Register object,
|
||||
InstanceType instance_type,
|
||||
Register map) {
|
||||
__ AssertNotSmi(object);
|
||||
__ CmpObjectType(object, instance_type, map);
|
||||
}
|
||||
void BaselineAssembler::CmpInstanceType(Register map,
|
||||
InstanceType instance_type) {
|
||||
if (emit_debug_code()) {
|
||||
__ movd(xmm0, eax);
|
||||
__ AssertNotSmi(map);
|
||||
__ CmpObjectType(map, MAP_TYPE, eax);
|
||||
__ Assert(equal, AbortReason::kUnexpectedValue);
|
||||
__ movd(eax, xmm0);
|
||||
}
|
||||
__ CmpInstanceType(map, instance_type);
|
||||
}
|
||||
void BaselineAssembler::Cmp(Register value, Smi smi) {
|
||||
if (smi.value() == 0) {
|
||||
__ test(value, value);
|
||||
} else {
|
||||
__ cmp(value, Immediate(smi));
|
||||
}
|
||||
}
|
||||
void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
|
||||
__ cmp(value, operand);
|
||||
}
|
||||
void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
|
||||
__ AssertSmi(lhs);
|
||||
__ AssertSmi(rhs);
|
||||
__ cmp(lhs, rhs);
|
||||
}
|
||||
void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
|
||||
__ cmp(value, operand);
|
||||
}
|
||||
void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
|
||||
__ cmp(operand, value);
|
||||
}
|
||||
void BaselineAssembler::CompareByte(Register value, int32_t byte) {
|
||||
__ cmpb(value, Immediate(byte));
|
||||
}
|
||||
void BaselineAssembler::Move(interpreter::Register output, Register source) {
|
||||
return __ mov(RegisterFrameOperand(output), source);
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, TaggedIndex value) {
|
||||
__ Move(output, Immediate(value.ptr()));
|
||||
}
|
||||
void BaselineAssembler::Move(MemOperand output, Register source) {
|
||||
__ mov(output, source);
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, ExternalReference reference) {
|
||||
__ Move(output, Immediate(reference));
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
|
||||
__ Move(output, value);
|
||||
}
|
||||
void BaselineAssembler::Move(Register output, int32_t value) {
|
||||
__ Move(output, Immediate(value));
|
||||
}
|
||||
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
|
||||
__ mov(output, source);
|
||||
}
|
||||
void BaselineAssembler::MoveSmi(Register output, Register source) {
|
||||
__ mov(output, source);
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
inline void PushSingle(MacroAssembler* masm, RootIndex source) {
|
||||
masm->PushRoot(source);
|
||||
}
|
||||
inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
|
||||
inline void PushSingle(MacroAssembler* masm, TaggedIndex value) {
|
||||
masm->Push(Immediate(value.ptr()));
|
||||
}
|
||||
inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); }
|
||||
inline void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
|
||||
masm->Push(object);
|
||||
}
|
||||
inline void PushSingle(MacroAssembler* masm, int32_t immediate) {
|
||||
masm->Push(Immediate(immediate));
|
||||
}
|
||||
inline void PushSingle(MacroAssembler* masm, MemOperand operand) {
|
||||
masm->Push(operand);
|
||||
}
|
||||
inline void PushSingle(MacroAssembler* masm, interpreter::Register source) {
|
||||
return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source));
|
||||
}
|
||||
|
||||
template <typename Arg>
|
||||
struct PushHelper {
|
||||
static int Push(BaselineAssembler* basm, Arg arg) {
|
||||
PushSingle(basm->masm(), arg);
|
||||
return 1;
|
||||
}
|
||||
static int PushReverse(BaselineAssembler* basm, Arg arg) {
|
||||
return Push(basm, arg);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct PushHelper<interpreter::RegisterList> {
|
||||
static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
|
||||
for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
|
||||
PushSingle(basm->masm(), list[reg_index]);
|
||||
}
|
||||
return list.register_count();
|
||||
}
|
||||
static int PushReverse(BaselineAssembler* basm,
|
||||
interpreter::RegisterList list) {
|
||||
for (int reg_index = list.register_count() - 1; reg_index >= 0;
|
||||
--reg_index) {
|
||||
PushSingle(basm->masm(), list[reg_index]);
|
||||
}
|
||||
return list.register_count();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename... Args>
|
||||
struct PushAllHelper;
|
||||
template <>
|
||||
struct PushAllHelper<> {
|
||||
static int Push(BaselineAssembler* masm) { return 0; }
|
||||
static int PushReverse(BaselineAssembler* masm) { return 0; }
|
||||
};
|
||||
template <typename Arg, typename... Args>
|
||||
struct PushAllHelper<Arg, Args...> {
|
||||
static int Push(BaselineAssembler* masm, Arg arg, Args... args) {
|
||||
int nargs = PushHelper<Arg>::Push(masm, arg);
|
||||
return nargs + PushAllHelper<Args...>::Push(masm, args...);
|
||||
}
|
||||
static int PushReverse(BaselineAssembler* masm, Arg arg, Args... args) {
|
||||
int nargs = PushAllHelper<Args...>::PushReverse(masm, args...);
|
||||
return nargs + PushHelper<Arg>::PushReverse(masm, arg);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
template <typename... T>
|
||||
int BaselineAssembler::Push(T... vals) {
|
||||
return detail::PushAllHelper<T...>::Push(this, vals...);
|
||||
}
|
||||
|
||||
template <typename... T>
|
||||
void BaselineAssembler::PushReverse(T... vals) {
|
||||
detail::PushAllHelper<T...>::PushReverse(this, vals...);
|
||||
}
|
||||
|
||||
template <typename... T>
|
||||
void BaselineAssembler::Pop(T... registers) {
|
||||
ITERATE_PACK(__ Pop(registers));
|
||||
}
|
||||
|
||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
||||
int offset) {
|
||||
__ mov(output, FieldOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||
int offset) {
|
||||
__ mov(output, FieldOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
||||
int offset) {
|
||||
__ mov(output, FieldOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::LoadByteField(Register output, Register source,
|
||||
int offset) {
|
||||
__ mov_b(output, FieldOperand(source, offset));
|
||||
}
|
||||
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
|
||||
Smi value) {
|
||||
__ mov(FieldOperand(target, offset), Immediate(value));
|
||||
}
|
||||
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
|
||||
int offset,
|
||||
|
||||
Register value) {
|
||||
BaselineAssembler::ScratchRegisterScope scratch_scope(this);
|
||||
Register scratch = scratch_scope.AcquireScratch();
|
||||
DCHECK(!AreAliased(scratch, target, value));
|
||||
__ mov(FieldOperand(target, offset), value);
|
||||
__ RecordWriteField(target, offset, value, scratch, kDontSaveFPRegs);
|
||||
}
|
||||
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
|
||||
int offset,
|
||||
Register value) {
|
||||
DCHECK(!AreAliased(target, value));
|
||||
__ mov(FieldOperand(target, offset), value);
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||
Immediate(weight));
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddToInterruptBudget(Register weight) {
|
||||
ScratchRegisterScope scratch_scope(this);
|
||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||
DCHECK(!AreAliased(feedback_cell, weight));
|
||||
LoadFunction(feedback_cell);
|
||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
||||
JSFunction::kFeedbackCellOffset);
|
||||
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||
weight);
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
|
||||
if (rhs.value() == 0) return;
|
||||
__ add(lhs, Immediate(rhs));
|
||||
}
|
||||
|
||||
void BaselineAssembler::Switch(Register reg, int case_value_base,
|
||||
Label** labels, int num_labels) {
|
||||
ScratchRegisterScope scope(this);
|
||||
Register table = scope.AcquireScratch();
|
||||
DCHECK(!AreAliased(reg, table));
|
||||
Label fallthrough, jump_table;
|
||||
if (case_value_base > 0) {
|
||||
__ sub(reg, Immediate(case_value_base));
|
||||
}
|
||||
__ cmp(reg, Immediate(num_labels));
|
||||
__ j(above_equal, &fallthrough);
|
||||
__ lea(table, MemOperand(&jump_table));
|
||||
__ jmp(Operand(table, reg, times_system_pointer_size, 0));
|
||||
// Emit the jump table inline, under the assumption that it's not too big.
|
||||
__ Align(kSystemPointerSize);
|
||||
__ bind(&jump_table);
|
||||
for (int i = 0; i < num_labels; ++i) {
|
||||
__ dd(labels[i]);
|
||||
}
|
||||
__ bind(&fallthrough);
|
||||
}
|
||||
|
||||
#undef __
|
||||
#define __ basm.
|
||||
|
||||
void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
|
||||
BaselineAssembler basm(masm);
|
||||
|
||||
Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
|
||||
Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
|
||||
|
||||
__ RecordComment("[ Update Interrupt Budget");
|
||||
__ AddToInterruptBudget(weight);
|
||||
|
||||
// Use compare flags set by AddToInterruptBudget
|
||||
Label skip_interrupt_label;
|
||||
__ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
|
||||
{
|
||||
__ masm()->SmiTag(params_size);
|
||||
__ Push(params_size, kInterpreterAccumulatorRegister);
|
||||
|
||||
__ LoadContext(kContextRegister);
|
||||
__ Push(MemOperand(ebp, InterpreterFrameConstants::kFunctionOffset));
|
||||
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
|
||||
|
||||
__ Pop(kInterpreterAccumulatorRegister, params_size);
|
||||
__ masm()->SmiUntag(params_size);
|
||||
}
|
||||
__ RecordComment("]");
|
||||
|
||||
__ Bind(&skip_interrupt_label);
|
||||
|
||||
BaselineAssembler::ScratchRegisterScope scope(&basm);
|
||||
Register scratch = scope.AcquireScratch();
|
||||
DCHECK(!AreAliased(weight, params_size, scratch));
|
||||
|
||||
Register actual_params_size = scratch;
|
||||
// Compute the size of the actual parameters + receiver (in bytes).
|
||||
__ masm()->mov(actual_params_size,
|
||||
MemOperand(ebp, StandardFrameConstants::kArgCOffset));
|
||||
|
||||
// If actual is bigger than formal, then we should use it to free up the stack
|
||||
// arguments.
|
||||
Label corrected_args_count;
|
||||
__ masm()->cmp(params_size, actual_params_size);
|
||||
__ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count, Label::kNear);
|
||||
__ masm()->mov(params_size, actual_params_size);
|
||||
__ Bind(&corrected_args_count);
|
||||
|
||||
// Leave the frame (also dropping the register file).
|
||||
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
||||
|
||||
// Drop receiver + arguments.
|
||||
Register return_pc = scratch;
|
||||
__ masm()->PopReturnAddressTo(return_pc);
|
||||
__ masm()->lea(esp, MemOperand(esp, params_size, times_system_pointer_size,
|
||||
kSystemPointerSize));
|
||||
__ masm()->PushReturnAddressFrom(return_pc);
|
||||
__ masm()->Ret();
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
} // namespace baseline
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_BASELINE_IA32_BASELINE_ASSEMBLER_IA32_INL_H_
|
93
src/baseline/ia32/baseline-compiler-ia32-inl.h
Normal file
93
src/baseline/ia32/baseline-compiler-ia32-inl.h
Normal file
@ -0,0 +1,93 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_BASELINE_IA32_BASELINE_COMPILER_IA32_INL_H_
|
||||
#define V8_BASELINE_IA32_BASELINE_COMPILER_IA32_INL_H_
|
||||
|
||||
#include "src/base/macros.h"
|
||||
#include "src/baseline/baseline-compiler.h"
|
||||
#include "src/codegen/interface-descriptors.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace baseline {
|
||||
|
||||
#define __ basm_.
|
||||
|
||||
void BaselineCompiler::Prologue() {
|
||||
__ Move(ecx, bytecode_);
|
||||
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
|
||||
CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
|
||||
kJSFunctionRegister, kJavaScriptCallArgCountRegister, ecx,
|
||||
kJavaScriptCallNewTargetRegister);
|
||||
|
||||
PrologueFillFrame();
|
||||
}
|
||||
|
||||
void BaselineCompiler::PrologueFillFrame() {
|
||||
__ RecordComment("[ Fill frame");
|
||||
// Inlined register frame fill
|
||||
interpreter::Register new_target_or_generator_register =
|
||||
bytecode_->incoming_new_target_or_generator_register();
|
||||
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||
int register_count = bytecode_->register_count();
|
||||
// Magic value
|
||||
const int kLoopUnrollSize = 8;
|
||||
const int new_target_index = new_target_or_generator_register.index();
|
||||
const bool has_new_target = new_target_index != kMaxInt;
|
||||
if (has_new_target) {
|
||||
DCHECK_LE(new_target_index, register_count);
|
||||
for (int i = 0; i < new_target_index; i++) {
|
||||
__ Push(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
// Push new_target_or_generator.
|
||||
__ Push(kJavaScriptCallNewTargetRegister);
|
||||
register_count -= new_target_index + 1;
|
||||
}
|
||||
if (register_count < 2 * kLoopUnrollSize) {
|
||||
// If the frame is small enough, just unroll the frame fill completely.
|
||||
for (int i = 0; i < register_count; ++i) {
|
||||
__ Push(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
} else {
|
||||
// Extract the first few registers to round to the unroll size.
|
||||
int first_registers = register_count % kLoopUnrollSize;
|
||||
for (int i = 0; i < first_registers; ++i) {
|
||||
__ Push(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
BaselineAssembler::ScratchRegisterScope scope(&basm_);
|
||||
Register scratch = scope.AcquireScratch();
|
||||
__ Move(scratch, register_count / kLoopUnrollSize);
|
||||
// We enter the loop unconditionally, so make sure we need to loop at least
|
||||
// once.
|
||||
DCHECK_GT(register_count / kLoopUnrollSize, 0);
|
||||
Label loop;
|
||||
__ Bind(&loop);
|
||||
for (int i = 0; i < kLoopUnrollSize; ++i) {
|
||||
__ Push(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
__ masm()->dec(scratch);
|
||||
__ JumpIf(Condition::kGreaterThan, &loop);
|
||||
}
|
||||
__ RecordComment("]");
|
||||
}
|
||||
|
||||
void BaselineCompiler::VerifyFrameSize() {
|
||||
__ masm()->movd(xmm0, eax);
|
||||
__ Move(eax, esp);
|
||||
__ masm()->add(eax,
|
||||
Immediate(InterpreterFrameConstants::kFixedFrameSizeFromFp +
|
||||
bytecode_->frame_size()));
|
||||
__ masm()->cmp(eax, ebp);
|
||||
__ masm()->Assert(equal, AbortReason::kUnexpectedStackPointer);
|
||||
__ masm()->movd(eax, xmm0);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
} // namespace baseline
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_BASELINE_IA32_BASELINE_COMPILER_IA32_INL_H_
|
@ -22,7 +22,6 @@ static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters);
|
||||
|
||||
} // namespace detail
|
||||
|
||||
// TODO(v8:11429): Move BaselineAssembler to baseline-assembler-<arch>-inl.h
|
||||
class BaselineAssembler::ScratchRegisterScope {
|
||||
public:
|
||||
explicit ScratchRegisterScope(BaselineAssembler* assembler)
|
||||
|
@ -930,7 +930,7 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
|
||||
|
||||
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
|
||||
// architectures.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
|
||||
EmitReturnBaseline(masm);
|
||||
}
|
||||
|
@ -563,6 +563,25 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
||||
Register sfi_data,
|
||||
Register scratch1,
|
||||
Label* is_baseline) {
|
||||
Label done;
|
||||
__ LoadMap(scratch1, sfi_data);
|
||||
|
||||
__ CmpInstanceType(scratch1, BASELINE_DATA_TYPE);
|
||||
__ j(equal, is_baseline);
|
||||
|
||||
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
|
||||
__ j(not_equal, &done, Label::kNear);
|
||||
|
||||
__ mov(sfi_data,
|
||||
FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
||||
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
@ -645,13 +664,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
|
||||
// Underlying function needs to have bytecode available.
|
||||
if (FLAG_debug_code) {
|
||||
Label is_baseline, ok;
|
||||
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
|
||||
__ Push(eax);
|
||||
GetSharedFunctionInfoBytecode(masm, ecx, eax);
|
||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, ecx, eax, &is_baseline);
|
||||
__ Pop(eax);
|
||||
|
||||
__ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
|
||||
__ Assert(equal, AbortReason::kMissingBytecodeArray);
|
||||
__ jmp(&ok);
|
||||
|
||||
__ bind(&is_baseline);
|
||||
__ Pop(eax);
|
||||
__ CmpObjectType(ecx, BASELINE_DATA_TYPE, ecx);
|
||||
__ Assert(equal, AbortReason::kMissingBytecodeArray);
|
||||
|
||||
__ bind(&ok);
|
||||
}
|
||||
|
||||
// Resume (Ignition/TurboFan) generator object.
|
||||
@ -919,6 +948,31 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||
__ bind(&end);
|
||||
}
|
||||
|
||||
// Read off the optimization state in the feedback vector and check if there
|
||||
// is optimized code or a optimization marker that needs to be processed.
|
||||
// Registers optimization_state and feedback_vector must be aliased.
|
||||
static void LoadOptimizationStateAndJumpIfNeedsProcessing(
|
||||
MacroAssembler* masm, Register optimization_state,
|
||||
XMMRegister saved_feedback_vector, Label* has_optimized_code_or_marker) {
|
||||
Register feedback_vector = optimization_state;
|
||||
__ RecordComment("[ Check optimization state");
|
||||
|
||||
// Store feedback_vector. We may need it if we need to load the optimize code
|
||||
// slot entry.
|
||||
__ movd(saved_feedback_vector, feedback_vector);
|
||||
__ mov(optimization_state,
|
||||
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
|
||||
// Check if there is optimized code or a optimization marker that needes to be
|
||||
// processed.
|
||||
__ test(
|
||||
optimization_state,
|
||||
Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
|
||||
__ j(not_zero, has_optimized_code_or_marker);
|
||||
|
||||
__ RecordComment("]");
|
||||
}
|
||||
|
||||
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
MacroAssembler* masm, Register optimization_state,
|
||||
XMMRegister saved_feedback_vector) {
|
||||
@ -964,10 +1018,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
|
||||
// The bytecode array could have been flushed from the shared function info,
|
||||
// if so, call into CompileLazy.
|
||||
Label compile_lazy;
|
||||
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
|
||||
GetSharedFunctionInfoBytecode(masm, ecx, eax);
|
||||
|
||||
Label is_baseline;
|
||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, ecx, eax, &is_baseline);
|
||||
|
||||
Label compile_lazy;
|
||||
__ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, eax);
|
||||
__ j(not_equal, &compile_lazy);
|
||||
|
||||
@ -985,20 +1042,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
|
||||
// Load the optimization state from the feedback vector and re-use the
|
||||
// register.
|
||||
Register optimization_state = ecx;
|
||||
// Store feedback_vector. We may need it if we need to load the optimze code
|
||||
// slot entry.
|
||||
__ movd(xmm1, feedback_vector);
|
||||
__ mov(optimization_state,
|
||||
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
|
||||
|
||||
// Check if there is optimized code or a optimization marker that needes to be
|
||||
// processed.
|
||||
Label has_optimized_code_or_marker;
|
||||
__ test(
|
||||
optimization_state,
|
||||
Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
|
||||
__ j(not_zero, &has_optimized_code_or_marker);
|
||||
Register optimization_state = ecx;
|
||||
LoadOptimizationStateAndJumpIfNeedsProcessing(masm, optimization_state, xmm1,
|
||||
&has_optimized_code_or_marker);
|
||||
|
||||
Label not_optimized;
|
||||
__ bind(¬_optimized);
|
||||
@ -1183,6 +1230,39 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ movd(eax, xmm0);
|
||||
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
|
||||
|
||||
__ bind(&is_baseline);
|
||||
{
|
||||
__ movd(xmm2, ecx); // Save baseline data.
|
||||
// Load the feedback vector from the closure.
|
||||
__ mov(feedback_vector,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
|
||||
|
||||
Label install_baseline_code;
|
||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||
// allocate it.
|
||||
__ LoadMap(eax, feedback_vector);
|
||||
__ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE);
|
||||
__ j(not_equal, &install_baseline_code);
|
||||
|
||||
// Check for an optimization marker.
|
||||
LoadOptimizationStateAndJumpIfNeedsProcessing(
|
||||
masm, optimization_state, xmm1, &has_optimized_code_or_marker);
|
||||
|
||||
// Load the baseline code into the closure.
|
||||
__ movd(ecx, xmm2);
|
||||
__ mov(ecx, FieldOperand(ecx, BaselineData::kBaselineCodeOffset));
|
||||
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
|
||||
__ push(edx); // Spill.
|
||||
ReplaceClosureCodeWithOptimizedCode(masm, ecx, closure, eax, edx);
|
||||
__ pop(edx);
|
||||
__ movd(eax, xmm0); // Recover argument count.
|
||||
__ JumpCodeObject(ecx);
|
||||
|
||||
__ bind(&install_baseline_code);
|
||||
GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
|
||||
}
|
||||
|
||||
__ bind(&stack_overflow);
|
||||
__ CallRuntime(Runtime::kThrowStackOverflow);
|
||||
__ int3(); // Should not return.
|
||||
@ -1555,6 +1635,146 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
// static
|
||||
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
auto descriptor = Builtins::CallInterfaceDescriptorFor(
|
||||
Builtins::kBaselineOutOfLinePrologue);
|
||||
Register arg_count = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
|
||||
Register bytecode_array = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
|
||||
|
||||
// Save argument count and bytecode array.
|
||||
XMMRegister saved_arg_count = xmm0;
|
||||
XMMRegister saved_bytecode_array = xmm1;
|
||||
__ movd(saved_arg_count, arg_count);
|
||||
__ movd(saved_bytecode_array, bytecode_array);
|
||||
|
||||
Register scratch = eax;
|
||||
|
||||
// Load the feedback vector from the closure.
|
||||
Register feedback_vector = ecx;
|
||||
Register closure = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
__ mov(feedback_vector,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
|
||||
if (__ emit_debug_code()) {
|
||||
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, scratch);
|
||||
__ Assert(equal, AbortReason::kExpectedFeedbackVector);
|
||||
}
|
||||
|
||||
// Load the optimization state from the feedback vector and re-use the
|
||||
// register.
|
||||
Label has_optimized_code_or_marker;
|
||||
Register optimization_state = ecx;
|
||||
XMMRegister saved_feedback_vector = xmm2;
|
||||
LoadOptimizationStateAndJumpIfNeedsProcessing(masm, optimization_state,
|
||||
saved_feedback_vector,
|
||||
&has_optimized_code_or_marker);
|
||||
|
||||
// Load the feedback vector and increment the invocation count.
|
||||
__ movd(feedback_vector, saved_feedback_vector);
|
||||
__ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
|
||||
|
||||
XMMRegister return_address = xmm4;
|
||||
__ RecordComment("[ Frame Setup");
|
||||
// Save the return address, so that we can push it to the end of the newly
|
||||
// set-up frame once we're done setting it up.
|
||||
__ PopReturnAddressTo(return_address, scratch);
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ EnterFrame(StackFrame::BASELINE);
|
||||
|
||||
__ Push(descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kCalleeContext)); // Callee's
|
||||
// context.
|
||||
Register callee_js_function = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||
DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
|
||||
DCHECK_EQ(callee_js_function, kJSFunctionRegister);
|
||||
__ Push(callee_js_function); // Callee's JS function.
|
||||
__ Push(saved_arg_count, scratch); // Push actual argument count.
|
||||
|
||||
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
|
||||
// the frame, so load it into a register.
|
||||
__ movd(bytecode_array, saved_bytecode_array);
|
||||
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
|
||||
// are 8-bit fields next to each other, so we could just optimize by writing
|
||||
// a 16-bit. These static asserts guard our assumption is valid.
|
||||
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
|
||||
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
|
||||
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
|
||||
__ mov_w(FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset),
|
||||
Immediate(0));
|
||||
__ Push(bytecode_array);
|
||||
|
||||
// Baseline code frames store the feedback vector where interpreter would
|
||||
// store the bytecode offset.
|
||||
__ Push(saved_feedback_vector, scratch);
|
||||
__ RecordComment("]");
|
||||
|
||||
__ RecordComment("[ Stack/interrupt check");
|
||||
Label call_stack_guard;
|
||||
{
|
||||
// Stack check. This folds the checks for both the interrupt stack limit
|
||||
// check and the real stack limit into one by just checking for the
|
||||
// interrupt limit. The interrupt limit is either equal to the real stack
|
||||
// limit or tighter. By ensuring we have space until that limit after
|
||||
// building the frame we can quickly precheck both at once.
|
||||
//
|
||||
// TODO(v8:11429): Backport this folded check to the
|
||||
// InterpreterEntryTrampoline.
|
||||
Register frame_size = ecx;
|
||||
__ movd(bytecode_array, saved_bytecode_array);
|
||||
__ movzx_w(frame_size,
|
||||
FieldOperand(bytecode_array, BytecodeArray::kFrameSizeOffset));
|
||||
__ Move(scratch, esp);
|
||||
DCHECK_NE(frame_size, kJavaScriptCallNewTargetRegister);
|
||||
__ sub(scratch, frame_size);
|
||||
__ CompareStackLimit(scratch, StackLimitKind::kInterruptStackLimit);
|
||||
__ j(below, &call_stack_guard);
|
||||
__ RecordComment("]");
|
||||
}
|
||||
|
||||
// Push the return address back onto the stack for return.
|
||||
__ PushReturnAddressFrom(return_address, scratch);
|
||||
// Return to caller pushed pc, without any frame teardown.
|
||||
__ Ret();
|
||||
|
||||
__ bind(&has_optimized_code_or_marker);
|
||||
{
|
||||
__ RecordComment("[ Optimized marker check");
|
||||
// Drop the return address, rebalancing the return stack buffer by using
|
||||
// JumpMode::kPushAndReturn. We can't leave the slot and overwrite it on
|
||||
// return since we may do a runtime call along the way that requires the
|
||||
// stack to only contain valid frames.
|
||||
__ Drop(1);
|
||||
__ movd(arg_count, saved_arg_count); // Restore actual argument count.
|
||||
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
|
||||
saved_feedback_vector);
|
||||
__ Trap();
|
||||
__ RecordComment("]");
|
||||
}
|
||||
|
||||
__ bind(&call_stack_guard);
|
||||
{
|
||||
__ RecordComment("[ Stack/interrupt call");
|
||||
{
|
||||
// Push the baseline code return address now, as if it had been pushed by
|
||||
// the call to this builtin.
|
||||
__ PushReturnAddressFrom(return_address, scratch);
|
||||
FrameScope frame_scope(masm, StackFrame::INTERNAL);
|
||||
// Save incoming new target or generator
|
||||
__ Push(kJavaScriptCallNewTargetRegister);
|
||||
__ CallRuntime(Runtime::kStackGuard, 0);
|
||||
__ Pop(kJavaScriptCallNewTargetRegister);
|
||||
}
|
||||
|
||||
// Return to caller pushed pc, without any frame teardown.
|
||||
__ Ret();
|
||||
__ RecordComment("]");
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
|
||||
@ -1642,6 +1862,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
|
||||
__ ret(1 * kSystemPointerSize); // Remove eax.
|
||||
}
|
||||
|
||||
void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
|
||||
TailCallOptimizedCodeSlot(masm, ecx);
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
@ -2503,7 +2727,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
||||
RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
namespace {
|
||||
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
|
||||
@ -2517,9 +2742,11 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
|
||||
__ bind(&skip);
|
||||
|
||||
// Drop the handler frame that is be sitting on top of the actual
|
||||
// JavaScript frame. This is the case then OSR is triggered from bytecode.
|
||||
__ leave();
|
||||
if (is_interpreter) {
|
||||
// Drop the handler frame that is be sitting on top of the actual
|
||||
// JavaScript frame. This is the case then OSR is triggered from bytecode.
|
||||
__ leave();
|
||||
}
|
||||
|
||||
// Load deoptimization data from the code object.
|
||||
__ mov(ecx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
|
||||
@ -2539,6 +2766,15 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
// And "return" to the OSR entry point of the function.
|
||||
__ ret(0);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
return OnStackReplacement(masm, true);
|
||||
}
|
||||
|
||||
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
|
||||
return OnStackReplacement(masm, false);
|
||||
}
|
||||
|
||||
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
|
||||
// The function index was put in edi by the jump table trampoline.
|
||||
|
@ -235,6 +235,12 @@ class V8_EXPORT_PRIVATE Operand {
|
||||
explicit Operand(Register base, int32_t disp,
|
||||
RelocInfo::Mode rmode = RelocInfo::NONE);
|
||||
|
||||
// [rip + disp/r]
|
||||
explicit Operand(Label* label) {
|
||||
set_modrm(0, ebp);
|
||||
set_dispr(reinterpret_cast<intptr_t>(label), RelocInfo::INTERNAL_REFERENCE);
|
||||
}
|
||||
|
||||
// [base + index*scale + disp/r]
|
||||
explicit Operand(Register base, Register index, ScaleFactor scale,
|
||||
int32_t disp, RelocInfo::Mode rmode = RelocInfo::NONE);
|
||||
|
@ -90,13 +90,9 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
|
||||
const Register GrowArrayElementsDescriptor::KeyRegister() { return ecx; }
|
||||
|
||||
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
}
|
||||
const Register BaselineLeaveFrameDescriptor::WeightRegister() {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
UNREACHABLE();
|
||||
return esi;
|
||||
}
|
||||
const Register BaselineLeaveFrameDescriptor::WeightRegister() { return edi; }
|
||||
|
||||
// static
|
||||
const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
|
||||
@ -224,8 +220,8 @@ void CompareDescriptor::InitializePlatformSpecific(
|
||||
|
||||
void Compare_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
Register registers[] = {edx, eax, ecx};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void BinaryOpDescriptor::InitializePlatformSpecific(
|
||||
@ -236,8 +232,8 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
|
||||
|
||||
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on this platform.
|
||||
InitializePlatformUnimplemented(data, kParameterCount);
|
||||
Register registers[] = {edx, eax, ecx};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void ApiCallbackDescriptor::InitializePlatformSpecific(
|
||||
|
@ -1451,11 +1451,13 @@ void TurboAssembler::Prologue() {
|
||||
void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||
push(ebp);
|
||||
mov(ebp, esp);
|
||||
push(Immediate(StackFrame::TypeToMarker(type)));
|
||||
if (!StackFrame::IsJavaScript(type)) {
|
||||
Push(Immediate(StackFrame::TypeToMarker(type)));
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
|
||||
if (emit_debug_code()) {
|
||||
if (emit_debug_code() && !StackFrame::IsJavaScript(type)) {
|
||||
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
|
||||
Immediate(StackFrame::TypeToMarker(type)));
|
||||
Check(equal, AbortReason::kStackFrameTypesMustMatch);
|
||||
@ -2071,6 +2073,8 @@ void TurboAssembler::Move(Operand dst, const Immediate& src) {
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::Move(Register dst, Operand src) { mov(dst, src); }
|
||||
|
||||
void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
|
||||
if (root_array_available() && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(dst, src);
|
||||
@ -2775,6 +2779,12 @@ void TurboAssembler::CallBuiltin(int builtin_index) {
|
||||
call(entry, RelocInfo::OFF_HEAP_TARGET);
|
||||
}
|
||||
|
||||
Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(
|
||||
Builtins::Name builtin_index) {
|
||||
return Operand(kRootRegister,
|
||||
IsolateData::builtin_entry_slot_offset(builtin_index));
|
||||
}
|
||||
|
||||
void TurboAssembler::LoadCodeObjectEntry(Register destination,
|
||||
Register code_object) {
|
||||
// Code objects are called differently depending on whether we are generating
|
||||
|
@ -125,6 +125,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void Move(Register dst, Smi src) { Move(dst, Immediate(src)); }
|
||||
void Move(Register dst, Handle<HeapObject> src);
|
||||
void Move(Register dst, Register src);
|
||||
void Move(Register dst, Operand src);
|
||||
void Move(Operand dst, const Immediate& src);
|
||||
|
||||
// Move an immediate into an XMM register.
|
||||
@ -133,7 +134,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
|
||||
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
|
||||
|
||||
Operand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
|
||||
|
||||
void Call(Register reg) { call(reg); }
|
||||
void Call(Operand op) { call(op); }
|
||||
void Call(Label* target) { call(target); }
|
||||
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
|
||||
|
||||
@ -189,6 +193,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
}
|
||||
|
||||
void SmiUntag(Register reg) { sar(reg, kSmiTagSize); }
|
||||
void SmiUntag(Register output, Register value) {
|
||||
mov(output, value);
|
||||
SmiUntag(output);
|
||||
}
|
||||
|
||||
// Removes current frame and its arguments from the stack preserving the
|
||||
// arguments and a return address pushed to the stack for the next call. Both
|
||||
@ -243,6 +251,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void PushReturnAddressFrom(Register src) { push(src); }
|
||||
void PopReturnAddressTo(Register dst) { pop(dst); }
|
||||
|
||||
void PushReturnAddressFrom(XMMRegister src, Register scratch) {
|
||||
Push(src, scratch);
|
||||
}
|
||||
void PopReturnAddressTo(XMMRegister dst, Register scratch) {
|
||||
Pop(dst, scratch);
|
||||
}
|
||||
|
||||
void Ret();
|
||||
|
||||
// Root register utility functions.
|
||||
@ -712,6 +727,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void Push(Immediate value);
|
||||
void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
|
||||
void Push(Smi smi) { Push(Immediate(smi)); }
|
||||
void Push(XMMRegister src, Register scratch) {
|
||||
movd(scratch, src);
|
||||
push(scratch);
|
||||
}
|
||||
|
||||
void Pop(Register dst) { pop(dst); }
|
||||
void Pop(Operand dst) { pop(dst); }
|
||||
void Pop(XMMRegister dst, Register scratch) {
|
||||
pop(scratch);
|
||||
movd(dst, scratch);
|
||||
}
|
||||
|
||||
void SaveRegisters(RegList registers);
|
||||
void RestoreRegisters(RegList registers);
|
||||
@ -993,9 +1019,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
// from the stack, clobbering only the esp register.
|
||||
void Drop(int element_count);
|
||||
|
||||
void Pop(Register dst) { pop(dst); }
|
||||
void Pop(Operand dst) { pop(dst); }
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// In-place weak references.
|
||||
void LoadWeakValue(Register in_out, Label* target_if_cleared);
|
||||
|
@ -334,7 +334,14 @@ void StoreTransitionDescriptor::InitializePlatformSpecific(
|
||||
void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on other platforms.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
// TODO(v8:11503): Use register names that can be defined in each
|
||||
// architecture indenpendently of the interpreter registers.
|
||||
Register registers[] = {kContextRegister, kJSFunctionRegister,
|
||||
kJavaScriptCallArgCountRegister, ecx,
|
||||
kJavaScriptCallNewTargetRegister};
|
||||
data->InitializePlatformSpecific(kParameterCount, registers);
|
||||
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
Register registers[] = {
|
||||
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
|
||||
kInterpreterBytecodeArrayRegister, kJavaScriptCallNewTargetRegister};
|
||||
@ -347,7 +354,7 @@ void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
|
||||
void BaselineLeaveFrameDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
// TODO(v8:11421): Implement on other platforms.
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
Register registers[] = {ParamsSizeRegister(), WeightRegister()};
|
||||
data->InitializePlatformSpecific(kParameterCount, registers);
|
||||
#else
|
||||
|
@ -163,7 +163,7 @@ struct MaybeBoolFlag {
|
||||
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL false
|
||||
#endif
|
||||
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
#define ENABLE_SPARKPLUG true
|
||||
#else
|
||||
// TODO(v8:11421): Enable Sparkplug for other architectures
|
||||
|
Loading…
Reference in New Issue
Block a user