[maglev][arm64] Prologue and run an empty function
- Adds code gen prologue - Adds a few essential nodes in arm64 - Shares a few non-arch dependent nodes w/ x64 - Shares deferred code handling, deopting and vreg helpers w/ x64 Caveats: - Deopts don't work, since CallForDeoptimization in arm64 uses a different mechanism (not yet implemented). - ParallelMoveResolver and ExceptionHandlerTrampolineBuilder uses singles push/pop mechanism, which is not arm64 friendly. We add a padding for each push/pop at the moment. Bug: v8:7700 Change-Id: I2896f3cd272fc47d7bd9059c8cc8948221b3b932 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4069708 Commit-Queue: Victor Gomes <victorgomes@chromium.org> Reviewed-by: Darius Mercadier <dmercadier@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/main@{#84607}
This commit is contained in:
parent
13930b1eef
commit
4b8ccef6a8
@ -2447,6 +2447,28 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
|
||||
return is_int26(offset);
|
||||
}
|
||||
|
||||
// Check if the code object is marked for deoptimization. If it is, then it
|
||||
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
|
||||
// to:
|
||||
// 1. read from memory the word that contains that bit, which can be found in
|
||||
// the flags in the referenced {CodeDataContainer} object;
|
||||
// 2. test kMarkedForDeoptimizationBit in those flags; and
|
||||
// 3. if it is not zero then it jumps to the builtin.
|
||||
void TurboAssembler::BailoutIfDeoptimized() {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireX();
|
||||
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
|
||||
LoadTaggedPointerField(scratch,
|
||||
MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
||||
Ldr(scratch.W(),
|
||||
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
Label not_deoptimized;
|
||||
Tbz(scratch.W(), Code::kMarkedForDeoptimizationBit, ¬_deoptimized);
|
||||
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
||||
RelocInfo::CODE_TARGET);
|
||||
Bind(¬_deoptimized);
|
||||
}
|
||||
|
||||
void TurboAssembler::CallForDeoptimization(
|
||||
Builtin target, int deopt_id, Label* exit, DeoptimizeKind kind, Label* ret,
|
||||
Label* jump_deoptimization_entry_label) {
|
||||
|
@ -1003,6 +1003,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
// The return address on the stack is used by frame iteration.
|
||||
void StoreReturnAddressAndCall(Register target);
|
||||
|
||||
void BailoutIfDeoptimized();
|
||||
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
|
||||
DeoptimizeKind kind, Label* ret,
|
||||
Label* jump_deoptimization_entry_label);
|
||||
|
@ -652,27 +652,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
|
||||
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
|
||||
}
|
||||
|
||||
// Check if the code object is marked for deoptimization. If it is, then it
|
||||
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
|
||||
// to:
|
||||
// 1. read from memory the word that contains that bit, which can be found in
|
||||
// the flags in the referenced {CodeDataContainer} object;
|
||||
// 2. test kMarkedForDeoptimizationBit in those flags; and
|
||||
// 3. if it is not zero then it jumps to the builtin.
|
||||
void CodeGenerator::BailoutIfDeoptimized() {
|
||||
UseScratchRegisterScope temps(tasm());
|
||||
Register scratch = temps.AcquireX();
|
||||
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
|
||||
__ LoadTaggedPointerField(
|
||||
scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
||||
__ Ldr(scratch.W(),
|
||||
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
Label not_deoptimized;
|
||||
__ Tbz(scratch.W(), Code::kMarkedForDeoptimizationBit, ¬_deoptimized);
|
||||
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
||||
RelocInfo::CODE_TARGET);
|
||||
__ Bind(¬_deoptimized);
|
||||
}
|
||||
void CodeGenerator::BailoutIfDeoptimized() { __ BailoutIfDeoptimized(); }
|
||||
|
||||
// Assembles an instruction after register allocation, producing machine code.
|
||||
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
|
@ -18,9 +18,7 @@ constexpr Register kScratchRegister = x16;
|
||||
constexpr DoubleRegister kScratchDoubleReg = d30;
|
||||
|
||||
inline MemOperand MaglevAssembler::StackSlotOperand(StackSlot slot) {
|
||||
// TODO(v8:7700): Implement!
|
||||
UNREACHABLE();
|
||||
return MemOperand();
|
||||
return MemOperand(fp, slot.index);
|
||||
}
|
||||
|
||||
inline MemOperand MaglevAssembler::GetStackSlot(
|
||||
@ -40,31 +38,28 @@ inline MemOperand MaglevAssembler::ToMemOperand(const ValueLocation& location) {
|
||||
}
|
||||
|
||||
inline void MaglevAssembler::Move(StackSlot dst, Register src) {
|
||||
// TODO(v8:7700): Implement!
|
||||
UNREACHABLE();
|
||||
Str(src, StackSlotOperand(dst));
|
||||
}
|
||||
inline void MaglevAssembler::Move(StackSlot dst, DoubleRegister src) {
|
||||
// TODO(v8:7700): Implement!
|
||||
UNREACHABLE();
|
||||
}
|
||||
inline void MaglevAssembler::Move(Register dst, StackSlot src) {
|
||||
// TODO(v8:7700): Implement!
|
||||
UNREACHABLE();
|
||||
Ldr(dst, StackSlotOperand(src));
|
||||
}
|
||||
inline void MaglevAssembler::Move(DoubleRegister dst, StackSlot src) {
|
||||
// TODO(v8:7700): Implement!
|
||||
UNREACHABLE();
|
||||
}
|
||||
inline void MaglevAssembler::Move(MemOperand dst, Register src) {
|
||||
// TODO(v8:7700): Implement!
|
||||
UNREACHABLE();
|
||||
Str(src, dst);
|
||||
}
|
||||
inline void MaglevAssembler::Move(MemOperand dst, DoubleRegister src) {
|
||||
// TODO(v8:7700): Implement!
|
||||
UNREACHABLE();
|
||||
}
|
||||
inline void MaglevAssembler::Move(Register dst, MemOperand src) {
|
||||
MacroAssembler::Move(dst, src);
|
||||
Ldr(dst, src);
|
||||
}
|
||||
inline void MaglevAssembler::Move(DoubleRegister dst, MemOperand src) {
|
||||
// TODO(v8:7700): Implement!
|
||||
@ -80,22 +75,27 @@ inline void MaglevAssembler::Move(Register dst, Smi src) {
|
||||
inline void MaglevAssembler::Move(Register dst, Register src) {
|
||||
MacroAssembler::Move(dst, src);
|
||||
}
|
||||
inline void MaglevAssembler::Move(Register dst, Immediate i) {
|
||||
// TODO(v8:7700): Implement!
|
||||
UNREACHABLE();
|
||||
}
|
||||
inline void MaglevAssembler::Move(Register dst, Immediate i) { Mov(dst, i); }
|
||||
inline void MaglevAssembler::Move(DoubleRegister dst, double n) {
|
||||
// TODO(v8:7700): Implement!
|
||||
UNREACHABLE();
|
||||
}
|
||||
inline void MaglevAssembler::Move(Register dst, Handle<HeapObject> obj) {
|
||||
// TODO(v8:7700): Implement!
|
||||
UNREACHABLE();
|
||||
Mov(dst, Operand(obj));
|
||||
}
|
||||
|
||||
inline void MaglevAssembler::Jump(Label* target) { B(target); }
|
||||
|
||||
inline void MaglevAssembler::JumpIf(Condition cond, Label* target) {
|
||||
b(target, cond);
|
||||
}
|
||||
|
||||
// TODO(victorgomes): We should avoid dong a single push in arm64!
|
||||
inline void MaglevAssembler::Push(Register src) { Push(src, padreg); }
|
||||
inline void MaglevAssembler::Pop(Register dst) { Pop(padreg, dst); }
|
||||
|
||||
inline void MaglevAssembler::AssertStackSizeCorrect() {
|
||||
// TODO(v8:7700): Implement!
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
inline void MaglevAssembler::MaterialiseValueNode(Register dst,
|
||||
|
@ -3,15 +3,144 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/maglev/maglev-assembler.h"
|
||||
#include "src/maglev/arm64/maglev-assembler-arm64-inl.h"
|
||||
#include "src/maglev/maglev-graph.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace maglev {
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void MaglevAssembler::Prologue(Graph* graph) {
|
||||
// TODO(v8:7700): Implement!
|
||||
UNREACHABLE();
|
||||
if (v8_flags.maglev_ool_prologue) {
|
||||
// TODO(v8:7700): Implement!
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
CallTarget();
|
||||
|
||||
BailoutIfDeoptimized();
|
||||
|
||||
// Tiering support.
|
||||
// TODO(jgruber): Extract to a builtin.
|
||||
{
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register flags = temps.AcquireX();
|
||||
// TODO(v8:7700): There are only 2 available scratch registers, we use x9,
|
||||
// which is a local caller saved register instead here, since
|
||||
// LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing requests a scratch
|
||||
// register as well.
|
||||
Register feedback_vector = x9;
|
||||
|
||||
// Load the feedback vector.
|
||||
LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(kJSFunctionRegister, JSFunction::kFeedbackCellOffset));
|
||||
LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
AssertFeedbackVector(feedback_vector, flags);
|
||||
|
||||
DeferredCodeInfo* deferred_flags_need_processing = PushDeferredCode(
|
||||
[](MaglevAssembler* masm, Register flags, Register feedback_vector) {
|
||||
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
|
||||
// TODO(leszeks): This could definitely be a builtin that we
|
||||
// tail-call.
|
||||
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
|
||||
__ Trap();
|
||||
},
|
||||
flags, feedback_vector);
|
||||
|
||||
LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
|
||||
flags, feedback_vector, CodeKind::MAGLEV,
|
||||
&deferred_flags_need_processing->deferred_code_label);
|
||||
}
|
||||
|
||||
EnterFrame(StackFrame::MAGLEV);
|
||||
|
||||
// Save arguments in frame.
|
||||
// TODO(leszeks): Consider eliding this frame if we don't make any calls
|
||||
// that could clobber these registers.
|
||||
// Push the context and the JSFunction.
|
||||
Push(kContextRegister, kJSFunctionRegister);
|
||||
// Push the actual argument count a _possible_ stack slot.
|
||||
Push(kJavaScriptCallArgCountRegister, xzr);
|
||||
int remaining_stack_slots = code_gen_state()->stack_slots() - 1;
|
||||
|
||||
{
|
||||
ASM_CODE_COMMENT_STRING(this, " Stack/interrupt check");
|
||||
// Stack check. This folds the checks for both the interrupt stack limit
|
||||
// check and the real stack limit into one by just checking for the
|
||||
// interrupt limit. The interrupt limit is either equal to the real
|
||||
// stack limit or tighter. By ensuring we have space until that limit
|
||||
// after building the frame we can quickly precheck both at once.
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register stack_slots_size = temps.AcquireX();
|
||||
Register interrupt_stack_limit = temps.AcquireX();
|
||||
Mov(stack_slots_size, fp);
|
||||
// TODO(leszeks): Include a max call argument size here.
|
||||
Sub(stack_slots_size, stack_slots_size,
|
||||
Immediate(remaining_stack_slots * kSystemPointerSize));
|
||||
LoadStackLimit(interrupt_stack_limit, StackLimitKind::kInterruptStackLimit);
|
||||
Cmp(stack_slots_size, interrupt_stack_limit);
|
||||
|
||||
ZoneLabelRef deferred_call_stack_guard_return(this);
|
||||
JumpToDeferredIf(
|
||||
lo,
|
||||
[](MaglevAssembler* masm, ZoneLabelRef done, int stack_slots) {
|
||||
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
|
||||
// TODO(victorgomes): Push all aligned.
|
||||
__ B(*done);
|
||||
},
|
||||
deferred_call_stack_guard_return, remaining_stack_slots);
|
||||
bind(*deferred_call_stack_guard_return);
|
||||
}
|
||||
|
||||
// Initialize stack slots.
|
||||
if (graph->tagged_stack_slots() > 0) {
|
||||
ASM_CODE_COMMENT_STRING(this, "Initializing stack slots");
|
||||
|
||||
// If tagged_stack_slots is divisible by 2, we overshoot and allocate one
|
||||
// extra stack slot, otherwise we allocate exactly the right amount, since
|
||||
// one stack has already been allocated.
|
||||
int tagged_two_slots_count = graph->tagged_stack_slots() / 2;
|
||||
remaining_stack_slots -= 2 * tagged_two_slots_count;
|
||||
|
||||
// Magic value. Experimentally, an unroll size of 8 doesn't seem any
|
||||
// worse than fully unrolled pushes.
|
||||
const int kLoopUnrollSize = 8;
|
||||
if (tagged_two_slots_count < kLoopUnrollSize) {
|
||||
for (int i = 0; i < tagged_two_slots_count; i++) {
|
||||
Push(xzr, xzr);
|
||||
}
|
||||
} else {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register count = temps.AcquireX();
|
||||
// Extract the first few slots to round to the unroll size.
|
||||
int first_slots = tagged_two_slots_count % kLoopUnrollSize;
|
||||
for (int i = 0; i < first_slots; ++i) {
|
||||
Push(xzr, xzr);
|
||||
}
|
||||
Move(count, Immediate(tagged_two_slots_count / kLoopUnrollSize));
|
||||
// We enter the loop unconditionally, so make sure we need to loop at
|
||||
// least once.
|
||||
DCHECK_GT(tagged_two_slots_count / kLoopUnrollSize, 0);
|
||||
Label loop;
|
||||
bind(&loop);
|
||||
for (int i = 0; i < kLoopUnrollSize; ++i) {
|
||||
Push(xzr, xzr);
|
||||
}
|
||||
sub(count, count, Immediate(1));
|
||||
b(&loop, gt);
|
||||
}
|
||||
}
|
||||
if (remaining_stack_slots > 0) {
|
||||
// Round up.
|
||||
remaining_stack_slots += (remaining_stack_slots % 2);
|
||||
// Extend rsp by the size of the remaining untagged part of the frame,
|
||||
// no need to initialise these.
|
||||
sub(fp, fp, Immediate(remaining_stack_slots * kSystemPointerSize));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace maglev
|
||||
|
@ -2,14 +2,22 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/codegen/arm64/assembler-arm64-inl.h"
|
||||
#include "src/codegen/arm64/register-arm64.h"
|
||||
#include "src/maglev/arm64/maglev-assembler-arm64-inl.h"
|
||||
#include "src/maglev/maglev-graph-processor.h"
|
||||
#include "src/maglev/maglev-graph.h"
|
||||
#include "src/maglev/maglev-ir.h"
|
||||
#include "src/maglev/maglev-vreg-allocator.h"
|
||||
#include "src/objects/feedback-cell.h"
|
||||
#include "src/objects/js-function.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace maglev {
|
||||
|
||||
#define __ masm->
|
||||
|
||||
// TODO(v8:7700): Remove this logic when all nodes are implemented.
|
||||
class MaglevUnimplementedIRNode {
|
||||
public:
|
||||
@ -39,6 +47,11 @@ class MaglevUnimplementedIRNode {
|
||||
has_unimplemented_node_ = true; \
|
||||
}
|
||||
|
||||
// If we don't have a specialization, it means we have implemented the node.
|
||||
template <typename NodeT>
|
||||
void MaglevUnimplementedIRNode::Process(NodeT* node,
|
||||
const ProcessingState& state) {}
|
||||
|
||||
bool MaglevGraphHasUnimplementedNode(Graph* graph) {
|
||||
GraphProcessor<MaglevUnimplementedIRNode> processor;
|
||||
processor.ProcessGraph(graph);
|
||||
@ -102,11 +115,6 @@ UNIMPLEMENTED_NODE(Float64LessThanOrEqual)
|
||||
UNIMPLEMENTED_NODE(Float64GreaterThan)
|
||||
UNIMPLEMENTED_NODE(Float64GreaterThanOrEqual)
|
||||
UNIMPLEMENTED_NODE(Float64Ieee754Unary)
|
||||
UNIMPLEMENTED_NODE(Constant)
|
||||
UNIMPLEMENTED_NODE(Float64Constant)
|
||||
UNIMPLEMENTED_NODE(Int32Constant)
|
||||
UNIMPLEMENTED_NODE(RootConstant)
|
||||
UNIMPLEMENTED_NODE(SmiConstant)
|
||||
UNIMPLEMENTED_NODE(BuiltinStringFromCharCode)
|
||||
UNIMPLEMENTED_NODE(BuiltinStringPrototypeCharCodeAt)
|
||||
UNIMPLEMENTED_NODE(Call, receiver_mode_, target_type_, feedback_)
|
||||
@ -136,7 +144,6 @@ UNIMPLEMENTED_NODE(GeneratorRestoreRegister)
|
||||
UNIMPLEMENTED_NODE(GetIterator)
|
||||
UNIMPLEMENTED_NODE(GetSecondReturnedValue)
|
||||
UNIMPLEMENTED_NODE(GetTemplateObject)
|
||||
UNIMPLEMENTED_NODE(InitialValue)
|
||||
UNIMPLEMENTED_NODE(LoadTaggedField)
|
||||
UNIMPLEMENTED_NODE(LoadDoubleField)
|
||||
UNIMPLEMENTED_NODE(LoadTaggedElement)
|
||||
@ -195,7 +202,6 @@ UNIMPLEMENTED_NODE(ToName)
|
||||
UNIMPLEMENTED_NODE(ToNumberOrNumeric)
|
||||
UNIMPLEMENTED_NODE(ToObject)
|
||||
UNIMPLEMENTED_NODE(ToString)
|
||||
UNIMPLEMENTED_NODE(ConstantGapMove)
|
||||
UNIMPLEMENTED_NODE(GapMove)
|
||||
UNIMPLEMENTED_NODE(AssertInt32, condition_, reason_)
|
||||
UNIMPLEMENTED_NODE(CheckDynamicValue)
|
||||
@ -224,8 +230,6 @@ UNIMPLEMENTED_NODE(StoreSignedIntDataViewElement, type_)
|
||||
UNIMPLEMENTED_NODE(StoreDoubleDataViewElement)
|
||||
UNIMPLEMENTED_NODE(StoreTaggedFieldNoWriteBarrier)
|
||||
UNIMPLEMENTED_NODE(StoreTaggedFieldWithWriteBarrier)
|
||||
UNIMPLEMENTED_NODE(IncreaseInterruptBudget)
|
||||
UNIMPLEMENTED_NODE(ReduceInterruptBudget)
|
||||
UNIMPLEMENTED_NODE(ThrowReferenceErrorIfHole)
|
||||
UNIMPLEMENTED_NODE(ThrowSuperNotCalledIfHole)
|
||||
UNIMPLEMENTED_NODE(ThrowSuperAlreadyCalledIfNotHole)
|
||||
@ -238,14 +242,118 @@ UNIMPLEMENTED_NODE(BranchIfFloat64Compare, operation_)
|
||||
UNIMPLEMENTED_NODE(BranchIfUndefinedOrNull)
|
||||
UNIMPLEMENTED_NODE(BranchIfJSReceiver)
|
||||
UNIMPLEMENTED_NODE(Switch)
|
||||
UNIMPLEMENTED_NODE(Jump)
|
||||
UNIMPLEMENTED_NODE(JumpLoop)
|
||||
UNIMPLEMENTED_NODE(JumpToInlined)
|
||||
UNIMPLEMENTED_NODE(JumpFromInlined)
|
||||
UNIMPLEMENTED_NODE(Abort)
|
||||
UNIMPLEMENTED_NODE(Return)
|
||||
UNIMPLEMENTED_NODE(Deopt)
|
||||
|
||||
void IncreaseInterruptBudget::AllocateVreg(
|
||||
MaglevVregAllocationState* vreg_state) {}
|
||||
void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register feedback_cell = temps.AcquireX();
|
||||
Register budget = temps.AcquireX();
|
||||
__ Ldr(feedback_cell,
|
||||
MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_cell,
|
||||
FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
|
||||
__ Ldr(budget,
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
|
||||
__ Add(budget, budget, Immediate(amount()));
|
||||
__ Str(budget,
|
||||
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
|
||||
}
|
||||
|
||||
void ReduceInterruptBudget::AllocateVreg(
|
||||
MaglevVregAllocationState* vreg_state) {}
|
||||
void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register feedback_cell = temps.AcquireX();
|
||||
Register budget = temps.AcquireX();
|
||||
__ Ldr(feedback_cell,
|
||||
MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_cell,
|
||||
FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset));
|
||||
__ Ldr(budget, FieldMemOperand(feedback_cell,
|
||||
FeedbackCell::kInterruptBudgetOffset));
|
||||
__ Sub(budget, budget, Immediate(amount()));
|
||||
__ Str(budget, FieldMemOperand(feedback_cell,
|
||||
FeedbackCell::kInterruptBudgetOffset));
|
||||
}
|
||||
|
||||
ZoneLabelRef done(masm);
|
||||
__ JumpToDeferredIf(
|
||||
lt,
|
||||
[](MaglevAssembler* masm, ZoneLabelRef done,
|
||||
ReduceInterruptBudget* node) {
|
||||
{
|
||||
SaveRegisterStateForCall save_register_state(
|
||||
masm, node->register_snapshot());
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register function = temps.AcquireX();
|
||||
__ Move(kContextRegister, static_cast<Handle<HeapObject>>(
|
||||
masm->native_context().object()));
|
||||
__ Ldr(function,
|
||||
MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||
__ PushArgument(function);
|
||||
__ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev,
|
||||
1);
|
||||
save_register_state.DefineSafepointWithLazyDeopt(
|
||||
node->lazy_deopt_info());
|
||||
}
|
||||
__ B(*done);
|
||||
},
|
||||
done, this);
|
||||
__ bind(*done);
|
||||
}
|
||||
|
||||
// ---
|
||||
// Control nodes
|
||||
// ---
|
||||
void Return::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
UseFixed(value_input(), kReturnRegister0);
|
||||
}
|
||||
void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
|
||||
DCHECK_EQ(ToRegister(value_input()), kReturnRegister0);
|
||||
// Read the formal number of parameters from the top level compilation unit
|
||||
// (i.e. the outermost, non inlined function).
|
||||
int formal_params_size =
|
||||
masm->compilation_info()->toplevel_compilation_unit()->parameter_count();
|
||||
|
||||
// We're not going to continue execution, so we can use an arbitrary register
|
||||
// here instead of relying on temporaries from the register allocator.
|
||||
// We cannot use scratch registers, since they're used in LeaveFrame and
|
||||
// DropArguments.
|
||||
Register actual_params_size = x9;
|
||||
Register params_size = x10;
|
||||
|
||||
// Compute the size of the actual parameters + receiver (in bytes).
|
||||
// TODO(leszeks): Consider making this an input into Return to re-use the
|
||||
// incoming argc's register (if it's still valid).
|
||||
__ Ldr(actual_params_size,
|
||||
MemOperand(fp, StandardFrameConstants::kArgCOffset));
|
||||
__ Mov(params_size, Immediate(formal_params_size));
|
||||
|
||||
// If actual is bigger than formal, then we should use it to free up the stack
|
||||
// arguments.
|
||||
Label corrected_args_count;
|
||||
__ CompareAndBranch(actual_params_size, params_size, ge,
|
||||
&corrected_args_count);
|
||||
__ Mov(params_size, actual_params_size);
|
||||
__ bind(&corrected_args_count);
|
||||
|
||||
// Leave the frame.
|
||||
__ LeaveFrame(StackFrame::MAGLEV);
|
||||
|
||||
// Drop receiver + arguments according to dynamic arguments size.
|
||||
__ DropArguments(params_size, TurboAssembler::kCountIncludesReceiver);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
} // namespace maglev
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -159,6 +159,15 @@ class MaglevAssembler : public MacroAssembler {
|
||||
inline void Move(DoubleRegister dst, double n);
|
||||
inline void Move(Register dst, Handle<HeapObject> obj);
|
||||
|
||||
inline void Jump(Label* target);
|
||||
inline void JumpIf(Condition cond, Label* target);
|
||||
|
||||
// TODO(victorgomes): Import baseline Push(T...) methods.
|
||||
inline void Push(Register src);
|
||||
using MacroAssembler::Push;
|
||||
inline void Pop(Register dst);
|
||||
using MacroAssembler::Pop;
|
||||
|
||||
void Prologue(Graph* graph);
|
||||
|
||||
inline void AssertStackSizeCorrect();
|
||||
@ -229,6 +238,218 @@ class SaveRegisterStateForCall {
|
||||
ZoneLabelRef::ZoneLabelRef(MaglevAssembler* masm)
|
||||
: ZoneLabelRef(masm->compilation_info()->zone()) {}
|
||||
|
||||
// ---
|
||||
// Deferred code handling.
|
||||
// ---
|
||||
|
||||
namespace detail {
|
||||
|
||||
// Base case provides an error.
|
||||
template <typename T, typename Enable = void>
|
||||
struct CopyForDeferredHelper {
|
||||
template <typename U>
|
||||
struct No_Copy_Helper_Implemented_For_Type;
|
||||
static void Copy(MaglevCompilationInfo* compilation_info,
|
||||
No_Copy_Helper_Implemented_For_Type<T>);
|
||||
};
|
||||
|
||||
// Helper for copies by value.
|
||||
template <typename T, typename Enable = void>
|
||||
struct CopyForDeferredByValue {
|
||||
static T Copy(MaglevCompilationInfo* compilation_info, T node) {
|
||||
return node;
|
||||
}
|
||||
};
|
||||
|
||||
// Node pointers are copied by value.
|
||||
template <typename T>
|
||||
struct CopyForDeferredHelper<
|
||||
T*, typename std::enable_if<std::is_base_of<NodeBase, T>::value>::type>
|
||||
: public CopyForDeferredByValue<T*> {};
|
||||
// Arithmetic values and enums are copied by value.
|
||||
template <typename T>
|
||||
struct CopyForDeferredHelper<
|
||||
T, typename std::enable_if<std::is_arithmetic<T>::value>::type>
|
||||
: public CopyForDeferredByValue<T> {};
|
||||
template <typename T>
|
||||
struct CopyForDeferredHelper<
|
||||
T, typename std::enable_if<std::is_enum<T>::value>::type>
|
||||
: public CopyForDeferredByValue<T> {};
|
||||
// MaglevCompilationInfos are copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<MaglevCompilationInfo*>
|
||||
: public CopyForDeferredByValue<MaglevCompilationInfo*> {};
|
||||
// Machine registers are copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<Register>
|
||||
: public CopyForDeferredByValue<Register> {};
|
||||
template <>
|
||||
struct CopyForDeferredHelper<DoubleRegister>
|
||||
: public CopyForDeferredByValue<DoubleRegister> {};
|
||||
// Bytecode offsets are copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<BytecodeOffset>
|
||||
: public CopyForDeferredByValue<BytecodeOffset> {};
|
||||
// EagerDeoptInfo pointers are copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<EagerDeoptInfo*>
|
||||
: public CopyForDeferredByValue<EagerDeoptInfo*> {};
|
||||
// ZoneLabelRef is copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<ZoneLabelRef>
|
||||
: public CopyForDeferredByValue<ZoneLabelRef> {};
|
||||
// Register snapshots are copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<RegisterSnapshot>
|
||||
: public CopyForDeferredByValue<RegisterSnapshot> {};
|
||||
// Feedback slots are copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<FeedbackSlot>
|
||||
: public CopyForDeferredByValue<FeedbackSlot> {};
|
||||
|
||||
template <typename T>
|
||||
T CopyForDeferred(MaglevCompilationInfo* compilation_info, T&& value) {
|
||||
return CopyForDeferredHelper<T>::Copy(compilation_info,
|
||||
std::forward<T>(value));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T CopyForDeferred(MaglevCompilationInfo* compilation_info, T& value) {
|
||||
return CopyForDeferredHelper<T>::Copy(compilation_info, value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T CopyForDeferred(MaglevCompilationInfo* compilation_info, const T& value) {
|
||||
return CopyForDeferredHelper<T>::Copy(compilation_info, value);
|
||||
}
|
||||
|
||||
template <typename Function>
|
||||
struct FunctionArgumentsTupleHelper
|
||||
: public FunctionArgumentsTupleHelper<decltype(&Function::operator())> {};
|
||||
|
||||
template <typename C, typename R, typename... A>
|
||||
struct FunctionArgumentsTupleHelper<R (C::*)(A...) const> {
|
||||
using FunctionPointer = R (*)(A...);
|
||||
using Tuple = std::tuple<A...>;
|
||||
static constexpr size_t kSize = sizeof...(A);
|
||||
};
|
||||
|
||||
template <typename R, typename... A>
|
||||
struct FunctionArgumentsTupleHelper<R (&)(A...)> {
|
||||
using FunctionPointer = R (*)(A...);
|
||||
using Tuple = std::tuple<A...>;
|
||||
static constexpr size_t kSize = sizeof...(A);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct StripFirstTupleArg;
|
||||
|
||||
template <typename T1, typename... T>
|
||||
struct StripFirstTupleArg<std::tuple<T1, T...>> {
|
||||
using Stripped = std::tuple<T...>;
|
||||
};
|
||||
|
||||
template <typename Function>
|
||||
class DeferredCodeInfoImpl final : public DeferredCodeInfo {
|
||||
public:
|
||||
using FunctionPointer =
|
||||
typename FunctionArgumentsTupleHelper<Function>::FunctionPointer;
|
||||
using Tuple = typename StripFirstTupleArg<
|
||||
typename FunctionArgumentsTupleHelper<Function>::Tuple>::Stripped;
|
||||
|
||||
template <typename... InArgs>
|
||||
explicit DeferredCodeInfoImpl(MaglevCompilationInfo* compilation_info,
|
||||
FunctionPointer function, InArgs&&... args)
|
||||
: function(function),
|
||||
args(CopyForDeferred(compilation_info, std::forward<InArgs>(args))...) {
|
||||
}
|
||||
|
||||
DeferredCodeInfoImpl(DeferredCodeInfoImpl&&) = delete;
|
||||
DeferredCodeInfoImpl(const DeferredCodeInfoImpl&) = delete;
|
||||
|
||||
void Generate(MaglevAssembler* masm) override {
|
||||
std::apply(function,
|
||||
std::tuple_cat(std::make_tuple(masm), std::move(args)));
|
||||
}
|
||||
|
||||
private:
|
||||
FunctionPointer function;
|
||||
Tuple args;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
template <typename Function, typename... Args>
|
||||
inline DeferredCodeInfo* MaglevAssembler::PushDeferredCode(
|
||||
Function&& deferred_code_gen, Args&&... args) {
|
||||
using FunctionPointer =
|
||||
typename detail::FunctionArgumentsTupleHelper<Function>::FunctionPointer;
|
||||
static_assert(
|
||||
std::is_invocable_v<FunctionPointer, MaglevAssembler*,
|
||||
decltype(detail::CopyForDeferred(
|
||||
std::declval<MaglevCompilationInfo*>(),
|
||||
std::declval<Args>()))...>,
|
||||
"Parameters of deferred_code_gen function should match arguments into "
|
||||
"PushDeferredCode");
|
||||
|
||||
using DeferredCodeInfoT = detail::DeferredCodeInfoImpl<Function>;
|
||||
DeferredCodeInfoT* deferred_code =
|
||||
compilation_info()->zone()->New<DeferredCodeInfoT>(
|
||||
compilation_info(), deferred_code_gen, std::forward<Args>(args)...);
|
||||
|
||||
code_gen_state()->PushDeferredCode(deferred_code);
|
||||
return deferred_code;
|
||||
}
|
||||
|
||||
// Note this doesn't take capturing lambdas by design, since state may
|
||||
// change until `deferred_code_gen` is actually executed. Use either a
|
||||
// non-capturing lambda, or a plain function pointer.
|
||||
template <typename Function, typename... Args>
|
||||
inline void MaglevAssembler::JumpToDeferredIf(Condition cond,
|
||||
Function&& deferred_code_gen,
|
||||
Args&&... args) {
|
||||
DeferredCodeInfo* deferred_code = PushDeferredCode<Function, Args...>(
|
||||
std::forward<Function>(deferred_code_gen), std::forward<Args>(args)...);
|
||||
if (v8_flags.code_comments) {
|
||||
RecordComment("-- Jump to deferred code");
|
||||
}
|
||||
JumpIf(cond, &deferred_code->deferred_code_label);
|
||||
}
|
||||
|
||||
// ---
|
||||
// Deopt
|
||||
// ---
|
||||
|
||||
inline void MaglevAssembler::RegisterEagerDeopt(EagerDeoptInfo* deopt_info,
|
||||
DeoptimizeReason reason) {
|
||||
if (deopt_info->reason() != DeoptimizeReason::kUnknown) {
|
||||
DCHECK_EQ(deopt_info->reason(), reason);
|
||||
}
|
||||
if (deopt_info->deopt_entry_label()->is_unused()) {
|
||||
code_gen_state()->PushEagerDeopt(deopt_info);
|
||||
deopt_info->set_reason(reason);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename NodeT>
|
||||
inline void MaglevAssembler::EmitEagerDeopt(NodeT* node,
|
||||
DeoptimizeReason reason) {
|
||||
static_assert(NodeT::kProperties.can_eager_deopt());
|
||||
RegisterEagerDeopt(node->eager_deopt_info(), reason);
|
||||
RecordComment("-- Jump to eager deopt");
|
||||
Jump(node->eager_deopt_info()->deopt_entry_label());
|
||||
}
|
||||
|
||||
template <typename NodeT>
|
||||
inline void MaglevAssembler::EmitEagerDeoptIf(Condition cond,
|
||||
DeoptimizeReason reason,
|
||||
NodeT* node) {
|
||||
static_assert(NodeT::kProperties.can_eager_deopt());
|
||||
RegisterEagerDeopt(node->eager_deopt_info(), reason);
|
||||
RecordComment("-- Jump to eager deopt");
|
||||
JumpIf(cond, node->eager_deopt_info()->deopt_entry_label());
|
||||
}
|
||||
|
||||
} // namespace maglev
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -558,6 +558,7 @@ class ExceptionHandlerTrampolineBuilder {
|
||||
// talking about a presumably infrequent case for exception handlers.
|
||||
|
||||
__ RecordComment("EmitMaterialisationsAndPushResults");
|
||||
|
||||
if (save_accumulator) __ Push(kReturnRegister0);
|
||||
for (const Move& move : moves) {
|
||||
// We consider constants after all other operations, since constants
|
||||
@ -586,7 +587,6 @@ class ExceptionHandlerTrampolineBuilder {
|
||||
__ Move(masm_->ToMemOperand(target.operand()), kScratchRegister);
|
||||
}
|
||||
}
|
||||
|
||||
if (save_accumulator) __ Pop(kReturnRegister0);
|
||||
}
|
||||
|
||||
@ -598,7 +598,16 @@ class MaglevCodeGeneratingNodeProcessor {
|
||||
explicit MaglevCodeGeneratingNodeProcessor(MaglevAssembler* masm)
|
||||
: masm_(masm) {}
|
||||
|
||||
void PreProcessGraph(Graph* graph) { __ Prologue(graph); }
|
||||
void PreProcessGraph(Graph* graph) {
|
||||
code_gen_state()->set_untagged_slots(graph->untagged_stack_slots());
|
||||
code_gen_state()->set_tagged_slots(graph->tagged_stack_slots());
|
||||
|
||||
if (v8_flags.maglev_break_on_entry) {
|
||||
__ DebugBreak();
|
||||
}
|
||||
|
||||
__ Prologue(graph);
|
||||
}
|
||||
|
||||
void PostProcessGraph(Graph* graph) {}
|
||||
|
||||
@ -1228,9 +1237,14 @@ void MaglevCodeGenerator::EmitDeopts() {
|
||||
deopt_index);
|
||||
}
|
||||
__ bind(deopt_info->deopt_entry_label());
|
||||
|
||||
#ifndef V8_TARGET_ARCH_ARM64
|
||||
// TODO(victorgomes): Implement jump deoptimizer entry label mechanism.
|
||||
__ CallForDeoptimization(Builtin::kDeoptimizationEntry_Eager, deopt_index,
|
||||
deopt_info->deopt_entry_label(),
|
||||
DeoptimizeKind::kEager, nullptr, nullptr);
|
||||
#endif
|
||||
|
||||
deopt_index++;
|
||||
}
|
||||
|
||||
@ -1246,9 +1260,13 @@ void MaglevCodeGenerator::EmitDeopts() {
|
||||
deopt_index);
|
||||
}
|
||||
__ bind(deopt_info->deopt_entry_label());
|
||||
|
||||
#ifndef V8_TARGET_ARCH_ARM64
|
||||
// TODO(victorgomes): Implement jump deoptimizer entry label mechanism.
|
||||
__ CallForDeoptimization(Builtin::kDeoptimizationEntry_Lazy, deopt_index,
|
||||
deopt_info->deopt_entry_label(),
|
||||
DeoptimizeKind::kLazy, nullptr, nullptr);
|
||||
#endif
|
||||
|
||||
last_updated_safepoint = safepoint_table_builder_.UpdateDeoptimizationInfo(
|
||||
deopt_info->deopting_call_return_pc(),
|
||||
|
@ -8,6 +8,8 @@
|
||||
#include "src/heap/local-heap.h"
|
||||
#include "src/heap/parked-scope.h"
|
||||
#include "src/maglev/maglev-graph-labeller.h"
|
||||
#include "src/maglev/maglev-graph-processor.h"
|
||||
#include "src/maglev/maglev-vreg-allocator.h"
|
||||
|
||||
#ifdef V8_TARGET_ARCH_ARM64
|
||||
#include "src/maglev/arm64/maglev-assembler-arm64-inl.h"
|
||||
@ -468,6 +470,117 @@ void RootConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
|
||||
__ LoadRoot(reg, index());
|
||||
}
|
||||
|
||||
// ---
|
||||
// Arch agnostic nodes
|
||||
// ---
|
||||
|
||||
void SmiConstant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
DefineAsConstant(vreg_state, this);
|
||||
}
|
||||
void SmiConstant::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {}
|
||||
|
||||
void Int32Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
DefineAsConstant(vreg_state, this);
|
||||
}
|
||||
void Int32Constant::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {}
|
||||
|
||||
void Float64Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
DefineAsConstant(vreg_state, this);
|
||||
}
|
||||
void Float64Constant::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {}
|
||||
|
||||
void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
DefineAsConstant(vreg_state, this);
|
||||
}
|
||||
void Constant::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {}
|
||||
|
||||
void RootConstant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
DefineAsConstant(vreg_state, this);
|
||||
}
|
||||
void RootConstant::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {}
|
||||
|
||||
void InitialValue::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
// TODO(leszeks): Make this nicer.
|
||||
result().SetUnallocated(compiler::UnallocatedOperand::FIXED_SLOT,
|
||||
(StandardFrameConstants::kExpressionsOffset -
|
||||
UnoptimizedFrameConstants::kRegisterFileFromFp) /
|
||||
kSystemPointerSize +
|
||||
source().index(),
|
||||
vreg_state->AllocateVirtualRegister());
|
||||
}
|
||||
void InitialValue::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
// No-op, the value is already in the appropriate slot.
|
||||
}
|
||||
|
||||
void ConstantGapMove::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
namespace {
|
||||
template <typename T>
|
||||
struct GetRegister;
|
||||
template <>
|
||||
struct GetRegister<Register> {
|
||||
static Register Get(compiler::AllocatedOperand target) {
|
||||
return target.GetRegister();
|
||||
}
|
||||
};
|
||||
template <>
|
||||
struct GetRegister<DoubleRegister> {
|
||||
static DoubleRegister Get(compiler::AllocatedOperand target) {
|
||||
return target.GetDoubleRegister();
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
void ConstantGapMove::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
switch (node_->opcode()) {
|
||||
#define CASE(Name) \
|
||||
case Opcode::k##Name: \
|
||||
return node_->Cast<Name>()->DoLoadToRegister( \
|
||||
masm, GetRegister<Name::OutputRegister>::Get(target()));
|
||||
CONSTANT_VALUE_NODE_LIST(CASE)
|
||||
#undef CASE
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
// ---
|
||||
// Arch agnostic control nodes
|
||||
// ---
|
||||
|
||||
void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
|
||||
void Jump::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
|
||||
// Avoid emitting a jump to the next block.
|
||||
if (target() != state.next_block()) {
|
||||
__ Jump(target()->label());
|
||||
}
|
||||
}
|
||||
|
||||
void JumpToInlined::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
|
||||
void JumpToInlined::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
// Avoid emitting a jump to the next block.
|
||||
if (target() != state.next_block()) {
|
||||
__ Jump(target()->label());
|
||||
}
|
||||
}
|
||||
void JumpFromInlined::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
|
||||
void JumpFromInlined::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
// Avoid emitting a jump to the next block.
|
||||
if (target() != state.next_block()) {
|
||||
__ Jump(target()->label());
|
||||
}
|
||||
}
|
||||
|
||||
// ---
|
||||
// Print params
|
||||
// ---
|
||||
|
@ -48,6 +48,64 @@ class MaglevVregAllocator {
|
||||
MaglevVregAllocationState state_;
|
||||
};
|
||||
|
||||
// ---
|
||||
// Vreg allocation helpers.
|
||||
// ---
|
||||
|
||||
inline int GetVirtualRegister(Node* node) {
|
||||
return compiler::UnallocatedOperand::cast(node->result().operand())
|
||||
.virtual_register();
|
||||
}
|
||||
|
||||
inline void DefineAsRegister(MaglevVregAllocationState* vreg_state,
|
||||
Node* node) {
|
||||
node->result().SetUnallocated(
|
||||
compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
|
||||
vreg_state->AllocateVirtualRegister());
|
||||
}
|
||||
inline void DefineAsConstant(MaglevVregAllocationState* vreg_state,
|
||||
Node* node) {
|
||||
node->result().SetUnallocated(compiler::UnallocatedOperand::NONE,
|
||||
vreg_state->AllocateVirtualRegister());
|
||||
}
|
||||
|
||||
inline void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node,
|
||||
Register reg) {
|
||||
node->result().SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER,
|
||||
reg.code(),
|
||||
vreg_state->AllocateVirtualRegister());
|
||||
}
|
||||
|
||||
inline void DefineSameAsFirst(MaglevVregAllocationState* vreg_state,
|
||||
Node* node) {
|
||||
node->result().SetUnallocated(vreg_state->AllocateVirtualRegister(), 0);
|
||||
}
|
||||
|
||||
inline void UseRegister(Input& input) {
|
||||
input.SetUnallocated(compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
|
||||
compiler::UnallocatedOperand::USED_AT_END,
|
||||
GetVirtualRegister(input.node()));
|
||||
}
|
||||
inline void UseAndClobberRegister(Input& input) {
|
||||
input.SetUnallocated(compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
|
||||
compiler::UnallocatedOperand::USED_AT_START,
|
||||
GetVirtualRegister(input.node()));
|
||||
}
|
||||
inline void UseAny(Input& input) {
|
||||
input.SetUnallocated(
|
||||
compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
|
||||
compiler::UnallocatedOperand::USED_AT_END,
|
||||
GetVirtualRegister(input.node()));
|
||||
}
|
||||
inline void UseFixed(Input& input, Register reg) {
|
||||
input.SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER, reg.code(),
|
||||
GetVirtualRegister(input.node()));
|
||||
}
|
||||
inline void UseFixed(Input& input, DoubleRegister reg) {
|
||||
input.SetUnallocated(compiler::UnallocatedOperand::FIXED_FP_REGISTER,
|
||||
reg.code(), GetVirtualRegister(input.node()));
|
||||
}
|
||||
|
||||
} // namespace maglev
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -235,6 +235,16 @@ inline void MaglevAssembler::Move(Register dst, Handle<HeapObject> obj) {
|
||||
MacroAssembler::Move(dst, obj);
|
||||
}
|
||||
|
||||
inline void MaglevAssembler::Jump(Label* target) { jmp(target); }
|
||||
|
||||
inline void MaglevAssembler::JumpIf(Condition cond, Label* target) {
|
||||
j(cond, target);
|
||||
}
|
||||
|
||||
inline void MaglevAssembler::Push(Register src) { MacroAssembler::Push(src); }
|
||||
|
||||
inline void MaglevAssembler::Pop(Register dst) { MacroAssembler::Pop(dst); }
|
||||
|
||||
inline void MaglevAssembler::MaterialiseValueNode(Register dst,
|
||||
ValueNode* value) {
|
||||
switch (value->opcode()) {
|
||||
@ -315,218 +325,6 @@ inline void MaglevAssembler::AssertStackSizeCorrect() {
|
||||
}
|
||||
}
|
||||
|
||||
// ---
|
||||
// Deferred code handling.
|
||||
// ---
|
||||
|
||||
namespace detail {
|
||||
|
||||
// Base case provides an error.
|
||||
template <typename T, typename Enable = void>
|
||||
struct CopyForDeferredHelper {
|
||||
template <typename U>
|
||||
struct No_Copy_Helper_Implemented_For_Type;
|
||||
static void Copy(MaglevCompilationInfo* compilation_info,
|
||||
No_Copy_Helper_Implemented_For_Type<T>);
|
||||
};
|
||||
|
||||
// Helper for copies by value.
|
||||
template <typename T, typename Enable = void>
|
||||
struct CopyForDeferredByValue {
|
||||
static T Copy(MaglevCompilationInfo* compilation_info, T node) {
|
||||
return node;
|
||||
}
|
||||
};
|
||||
|
||||
// Node pointers are copied by value.
|
||||
template <typename T>
|
||||
struct CopyForDeferredHelper<
|
||||
T*, typename std::enable_if<std::is_base_of<NodeBase, T>::value>::type>
|
||||
: public CopyForDeferredByValue<T*> {};
|
||||
// Arithmetic values and enums are copied by value.
|
||||
template <typename T>
|
||||
struct CopyForDeferredHelper<
|
||||
T, typename std::enable_if<std::is_arithmetic<T>::value>::type>
|
||||
: public CopyForDeferredByValue<T> {};
|
||||
template <typename T>
|
||||
struct CopyForDeferredHelper<
|
||||
T, typename std::enable_if<std::is_enum<T>::value>::type>
|
||||
: public CopyForDeferredByValue<T> {};
|
||||
// MaglevCompilationInfos are copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<MaglevCompilationInfo*>
|
||||
: public CopyForDeferredByValue<MaglevCompilationInfo*> {};
|
||||
// Machine registers are copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<Register>
|
||||
: public CopyForDeferredByValue<Register> {};
|
||||
template <>
|
||||
struct CopyForDeferredHelper<DoubleRegister>
|
||||
: public CopyForDeferredByValue<DoubleRegister> {};
|
||||
// Bytecode offsets are copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<BytecodeOffset>
|
||||
: public CopyForDeferredByValue<BytecodeOffset> {};
|
||||
// EagerDeoptInfo pointers are copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<EagerDeoptInfo*>
|
||||
: public CopyForDeferredByValue<EagerDeoptInfo*> {};
|
||||
// ZoneLabelRef is copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<ZoneLabelRef>
|
||||
: public CopyForDeferredByValue<ZoneLabelRef> {};
|
||||
// Register snapshots are copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<RegisterSnapshot>
|
||||
: public CopyForDeferredByValue<RegisterSnapshot> {};
|
||||
// Feedback slots are copied by value.
|
||||
template <>
|
||||
struct CopyForDeferredHelper<FeedbackSlot>
|
||||
: public CopyForDeferredByValue<FeedbackSlot> {};
|
||||
|
||||
template <typename T>
|
||||
T CopyForDeferred(MaglevCompilationInfo* compilation_info, T&& value) {
|
||||
return CopyForDeferredHelper<T>::Copy(compilation_info,
|
||||
std::forward<T>(value));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T CopyForDeferred(MaglevCompilationInfo* compilation_info, T& value) {
|
||||
return CopyForDeferredHelper<T>::Copy(compilation_info, value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T CopyForDeferred(MaglevCompilationInfo* compilation_info, const T& value) {
|
||||
return CopyForDeferredHelper<T>::Copy(compilation_info, value);
|
||||
}
|
||||
|
||||
template <typename Function>
|
||||
struct FunctionArgumentsTupleHelper
|
||||
: public FunctionArgumentsTupleHelper<decltype(&Function::operator())> {};
|
||||
|
||||
template <typename C, typename R, typename... A>
|
||||
struct FunctionArgumentsTupleHelper<R (C::*)(A...) const> {
|
||||
using FunctionPointer = R (*)(A...);
|
||||
using Tuple = std::tuple<A...>;
|
||||
static constexpr size_t kSize = sizeof...(A);
|
||||
};
|
||||
|
||||
template <typename R, typename... A>
|
||||
struct FunctionArgumentsTupleHelper<R (&)(A...)> {
|
||||
using FunctionPointer = R (*)(A...);
|
||||
using Tuple = std::tuple<A...>;
|
||||
static constexpr size_t kSize = sizeof...(A);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct StripFirstTupleArg;
|
||||
|
||||
template <typename T1, typename... T>
|
||||
struct StripFirstTupleArg<std::tuple<T1, T...>> {
|
||||
using Stripped = std::tuple<T...>;
|
||||
};
|
||||
|
||||
template <typename Function>
|
||||
class DeferredCodeInfoImpl final : public DeferredCodeInfo {
|
||||
public:
|
||||
using FunctionPointer =
|
||||
typename FunctionArgumentsTupleHelper<Function>::FunctionPointer;
|
||||
using Tuple = typename StripFirstTupleArg<
|
||||
typename FunctionArgumentsTupleHelper<Function>::Tuple>::Stripped;
|
||||
|
||||
template <typename... InArgs>
|
||||
explicit DeferredCodeInfoImpl(MaglevCompilationInfo* compilation_info,
|
||||
FunctionPointer function, InArgs&&... args)
|
||||
: function(function),
|
||||
args(CopyForDeferred(compilation_info, std::forward<InArgs>(args))...) {
|
||||
}
|
||||
|
||||
DeferredCodeInfoImpl(DeferredCodeInfoImpl&&) = delete;
|
||||
DeferredCodeInfoImpl(const DeferredCodeInfoImpl&) = delete;
|
||||
|
||||
void Generate(MaglevAssembler* masm) override {
|
||||
std::apply(function,
|
||||
std::tuple_cat(std::make_tuple(masm), std::move(args)));
|
||||
}
|
||||
|
||||
private:
|
||||
FunctionPointer function;
|
||||
Tuple args;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
template <typename Function, typename... Args>
|
||||
inline DeferredCodeInfo* MaglevAssembler::PushDeferredCode(
|
||||
Function&& deferred_code_gen, Args&&... args) {
|
||||
using FunctionPointer =
|
||||
typename detail::FunctionArgumentsTupleHelper<Function>::FunctionPointer;
|
||||
static_assert(
|
||||
std::is_invocable_v<FunctionPointer, MaglevAssembler*,
|
||||
decltype(detail::CopyForDeferred(
|
||||
std::declval<MaglevCompilationInfo*>(),
|
||||
std::declval<Args>()))...>,
|
||||
"Parameters of deferred_code_gen function should match arguments into "
|
||||
"PushDeferredCode");
|
||||
|
||||
using DeferredCodeInfoT = detail::DeferredCodeInfoImpl<Function>;
|
||||
DeferredCodeInfoT* deferred_code =
|
||||
compilation_info()->zone()->New<DeferredCodeInfoT>(
|
||||
compilation_info(), deferred_code_gen, std::forward<Args>(args)...);
|
||||
|
||||
code_gen_state()->PushDeferredCode(deferred_code);
|
||||
return deferred_code;
|
||||
}
|
||||
|
||||
// Note this doesn't take capturing lambdas by design, since state may
|
||||
// change until `deferred_code_gen` is actually executed. Use either a
|
||||
// non-capturing lambda, or a plain function pointer.
|
||||
template <typename Function, typename... Args>
|
||||
inline void MaglevAssembler::JumpToDeferredIf(Condition cond,
|
||||
Function&& deferred_code_gen,
|
||||
Args&&... args) {
|
||||
DeferredCodeInfo* deferred_code = PushDeferredCode<Function, Args...>(
|
||||
std::forward<Function>(deferred_code_gen), std::forward<Args>(args)...);
|
||||
if (v8_flags.code_comments) {
|
||||
RecordComment("-- Jump to deferred code");
|
||||
}
|
||||
j(cond, &deferred_code->deferred_code_label);
|
||||
}
|
||||
|
||||
// ---
|
||||
// Deopt
|
||||
// ---
|
||||
|
||||
inline void MaglevAssembler::RegisterEagerDeopt(EagerDeoptInfo* deopt_info,
|
||||
DeoptimizeReason reason) {
|
||||
if (deopt_info->reason() != DeoptimizeReason::kUnknown) {
|
||||
DCHECK_EQ(deopt_info->reason(), reason);
|
||||
}
|
||||
if (deopt_info->deopt_entry_label()->is_unused()) {
|
||||
code_gen_state()->PushEagerDeopt(deopt_info);
|
||||
deopt_info->set_reason(reason);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename NodeT>
|
||||
inline void MaglevAssembler::EmitEagerDeopt(NodeT* node,
|
||||
DeoptimizeReason reason) {
|
||||
static_assert(NodeT::kProperties.can_eager_deopt());
|
||||
RegisterEagerDeopt(node->eager_deopt_info(), reason);
|
||||
RecordComment("-- Jump to eager deopt");
|
||||
jmp(node->eager_deopt_info()->deopt_entry_label());
|
||||
}
|
||||
|
||||
template <typename NodeT>
|
||||
inline void MaglevAssembler::EmitEagerDeoptIf(Condition cond,
|
||||
DeoptimizeReason reason,
|
||||
NodeT* node) {
|
||||
static_assert(NodeT::kProperties.can_eager_deopt());
|
||||
RegisterEagerDeopt(node->eager_deopt_info(), reason);
|
||||
RecordComment("-- Jump to eager deopt");
|
||||
j(cond, node->eager_deopt_info()->deopt_entry_label());
|
||||
}
|
||||
|
||||
} // namespace maglev
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -379,136 +379,130 @@ void MaglevAssembler::TruncateDoubleToInt32(Register dst, DoubleRegister src) {
|
||||
}
|
||||
|
||||
void MaglevAssembler::Prologue(Graph* graph) {
|
||||
code_gen_state()->set_untagged_slots(graph->untagged_stack_slots());
|
||||
code_gen_state()->set_tagged_slots(graph->tagged_stack_slots());
|
||||
|
||||
if (v8_flags.maglev_break_on_entry) {
|
||||
int3();
|
||||
}
|
||||
|
||||
if (v8_flags.maglev_ool_prologue) {
|
||||
// Call the out-of-line prologue (with parameters passed on the stack).
|
||||
Push(Immediate(code_gen_state()->stack_slots() * kSystemPointerSize));
|
||||
Push(Immediate(code_gen_state()->tagged_slots() * kSystemPointerSize));
|
||||
CallBuiltin(Builtin::kMaglevOutOfLinePrologue);
|
||||
} else {
|
||||
BailoutIfDeoptimized(rbx);
|
||||
return;
|
||||
}
|
||||
|
||||
// Tiering support.
|
||||
// TODO(jgruber): Extract to a builtin (the tiering prologue is ~230 bytes
|
||||
// per Maglev code object on x64).
|
||||
{
|
||||
// Scratch registers. Don't clobber regs related to the calling
|
||||
// convention (e.g. kJavaScriptCallArgCountRegister). Keep up-to-date
|
||||
// with deferred flags code.
|
||||
Register flags = rcx;
|
||||
Register feedback_vector = r9;
|
||||
BailoutIfDeoptimized(rbx);
|
||||
|
||||
// Load the feedback vector.
|
||||
LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
FieldOperand(kJSFunctionRegister, JSFunction::kFeedbackCellOffset));
|
||||
LoadTaggedPointerField(feedback_vector,
|
||||
FieldOperand(feedback_vector, Cell::kValueOffset));
|
||||
AssertFeedbackVector(feedback_vector);
|
||||
// Tiering support.
|
||||
// TODO(jgruber): Extract to a builtin (the tiering prologue is ~230 bytes
|
||||
// per Maglev code object on x64).
|
||||
{
|
||||
// Scratch registers. Don't clobber regs related to the calling
|
||||
// convention (e.g. kJavaScriptCallArgCountRegister). Keep up-to-date
|
||||
// with deferred flags code.
|
||||
Register flags = rcx;
|
||||
Register feedback_vector = r9;
|
||||
|
||||
DeferredCodeInfo* deferred_flags_need_processing = PushDeferredCode(
|
||||
[](MaglevAssembler* masm, Register flags, Register feedback_vector) {
|
||||
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
|
||||
// TODO(leszeks): This could definitely be a builtin that we
|
||||
// tail-call.
|
||||
__ OptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
flags, feedback_vector, kJSFunctionRegister, JumpMode::kJump);
|
||||
__ Trap();
|
||||
},
|
||||
flags, feedback_vector);
|
||||
// Load the feedback vector.
|
||||
LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
FieldOperand(kJSFunctionRegister, JSFunction::kFeedbackCellOffset));
|
||||
LoadTaggedPointerField(feedback_vector,
|
||||
FieldOperand(feedback_vector, Cell::kValueOffset));
|
||||
AssertFeedbackVector(feedback_vector);
|
||||
|
||||
LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
|
||||
flags, feedback_vector, CodeKind::MAGLEV,
|
||||
&deferred_flags_need_processing->deferred_code_label);
|
||||
}
|
||||
DeferredCodeInfo* deferred_flags_need_processing = PushDeferredCode(
|
||||
[](MaglevAssembler* masm, Register flags, Register feedback_vector) {
|
||||
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
|
||||
// TODO(leszeks): This could definitely be a builtin that we
|
||||
// tail-call.
|
||||
__ OptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
flags, feedback_vector, kJSFunctionRegister, JumpMode::kJump);
|
||||
__ Trap();
|
||||
},
|
||||
flags, feedback_vector);
|
||||
|
||||
EnterFrame(StackFrame::MAGLEV);
|
||||
LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
|
||||
flags, feedback_vector, CodeKind::MAGLEV,
|
||||
&deferred_flags_need_processing->deferred_code_label);
|
||||
}
|
||||
|
||||
// Save arguments in frame.
|
||||
// TODO(leszeks): Consider eliding this frame if we don't make any calls
|
||||
// that could clobber these registers.
|
||||
Push(kContextRegister);
|
||||
Push(kJSFunctionRegister); // Callee's JS function.
|
||||
Push(kJavaScriptCallArgCountRegister); // Actual argument count.
|
||||
EnterFrame(StackFrame::MAGLEV);
|
||||
|
||||
{
|
||||
ASM_CODE_COMMENT_STRING(this, " Stack/interrupt check");
|
||||
// Stack check. This folds the checks for both the interrupt stack limit
|
||||
// check and the real stack limit into one by just checking for the
|
||||
// interrupt limit. The interrupt limit is either equal to the real
|
||||
// stack limit or tighter. By ensuring we have space until that limit
|
||||
// after building the frame we can quickly precheck both at once.
|
||||
Move(kScratchRegister, rsp);
|
||||
// TODO(leszeks): Include a max call argument size here.
|
||||
subq(kScratchRegister,
|
||||
Immediate(code_gen_state()->stack_slots() * kSystemPointerSize));
|
||||
cmpq(kScratchRegister,
|
||||
StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
|
||||
// Save arguments in frame.
|
||||
// TODO(leszeks): Consider eliding this frame if we don't make any calls
|
||||
// that could clobber these registers.
|
||||
Push(kContextRegister);
|
||||
Push(kJSFunctionRegister); // Callee's JS function.
|
||||
Push(kJavaScriptCallArgCountRegister); // Actual argument count.
|
||||
|
||||
ZoneLabelRef deferred_call_stack_guard_return(this);
|
||||
JumpToDeferredIf(
|
||||
below,
|
||||
[](MaglevAssembler* masm, ZoneLabelRef done, int stack_slots) {
|
||||
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
|
||||
// Save any registers that can be referenced by RegisterInput.
|
||||
// TODO(leszeks): Only push those that are used by the graph.
|
||||
__ PushAll(RegisterInput::kAllowedRegisters);
|
||||
// Push the frame size
|
||||
__ Push(Immediate(Smi::FromInt(stack_slots * kSystemPointerSize)));
|
||||
__ CallRuntime(Runtime::kStackGuardWithGap, 1);
|
||||
__ PopAll(RegisterInput::kAllowedRegisters);
|
||||
__ jmp(*done);
|
||||
},
|
||||
deferred_call_stack_guard_return, code_gen_state()->stack_slots());
|
||||
bind(*deferred_call_stack_guard_return);
|
||||
}
|
||||
{
|
||||
ASM_CODE_COMMENT_STRING(this, " Stack/interrupt check");
|
||||
// Stack check. This folds the checks for both the interrupt stack limit
|
||||
// check and the real stack limit into one by just checking for the
|
||||
// interrupt limit. The interrupt limit is either equal to the real
|
||||
// stack limit or tighter. By ensuring we have space until that limit
|
||||
// after building the frame we can quickly precheck both at once.
|
||||
Move(kScratchRegister, rsp);
|
||||
// TODO(leszeks): Include a max call argument size here.
|
||||
subq(kScratchRegister,
|
||||
Immediate(code_gen_state()->stack_slots() * kSystemPointerSize));
|
||||
cmpq(kScratchRegister,
|
||||
StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
|
||||
|
||||
// Initialize stack slots.
|
||||
if (graph->tagged_stack_slots() > 0) {
|
||||
ASM_CODE_COMMENT_STRING(this, "Initializing stack slots");
|
||||
// TODO(leszeks): Consider filling with xmm + movdqa instead.
|
||||
Move(rax, Immediate(0));
|
||||
ZoneLabelRef deferred_call_stack_guard_return(this);
|
||||
JumpToDeferredIf(
|
||||
below,
|
||||
[](MaglevAssembler* masm, ZoneLabelRef done, int stack_slots) {
|
||||
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
|
||||
// Save any registers that can be referenced by RegisterInput.
|
||||
// TODO(leszeks): Only push those that are used by the graph.
|
||||
__ PushAll(RegisterInput::kAllowedRegisters);
|
||||
// Push the frame size
|
||||
__ Push(Immediate(Smi::FromInt(stack_slots * kSystemPointerSize)));
|
||||
__ CallRuntime(Runtime::kStackGuardWithGap, 1);
|
||||
__ PopAll(RegisterInput::kAllowedRegisters);
|
||||
__ jmp(*done);
|
||||
},
|
||||
deferred_call_stack_guard_return, code_gen_state()->stack_slots());
|
||||
bind(*deferred_call_stack_guard_return);
|
||||
}
|
||||
|
||||
// Magic value. Experimentally, an unroll size of 8 doesn't seem any
|
||||
// worse than fully unrolled pushes.
|
||||
const int kLoopUnrollSize = 8;
|
||||
int tagged_slots = graph->tagged_stack_slots();
|
||||
if (tagged_slots < 2 * kLoopUnrollSize) {
|
||||
// If the frame is small enough, just unroll the frame fill
|
||||
// completely.
|
||||
for (int i = 0; i < tagged_slots; ++i) {
|
||||
pushq(rax);
|
||||
}
|
||||
} else {
|
||||
// Extract the first few slots to round to the unroll size.
|
||||
int first_slots = tagged_slots % kLoopUnrollSize;
|
||||
for (int i = 0; i < first_slots; ++i) {
|
||||
pushq(rax);
|
||||
}
|
||||
Move(rbx, Immediate(tagged_slots / kLoopUnrollSize));
|
||||
// We enter the loop unconditionally, so make sure we need to loop at
|
||||
// least once.
|
||||
DCHECK_GT(tagged_slots / kLoopUnrollSize, 0);
|
||||
Label loop;
|
||||
bind(&loop);
|
||||
for (int i = 0; i < kLoopUnrollSize; ++i) {
|
||||
pushq(rax);
|
||||
}
|
||||
decl(rbx);
|
||||
j(greater, &loop);
|
||||
// Initialize stack slots.
|
||||
if (graph->tagged_stack_slots() > 0) {
|
||||
ASM_CODE_COMMENT_STRING(this, "Initializing stack slots");
|
||||
// TODO(leszeks): Consider filling with xmm + movdqa instead.
|
||||
Move(rax, Immediate(0));
|
||||
|
||||
// Magic value. Experimentally, an unroll size of 8 doesn't seem any
|
||||
// worse than fully unrolled pushes.
|
||||
const int kLoopUnrollSize = 8;
|
||||
int tagged_slots = graph->tagged_stack_slots();
|
||||
if (tagged_slots < 2 * kLoopUnrollSize) {
|
||||
// If the frame is small enough, just unroll the frame fill
|
||||
// completely.
|
||||
for (int i = 0; i < tagged_slots; ++i) {
|
||||
pushq(rax);
|
||||
}
|
||||
} else {
|
||||
// Extract the first few slots to round to the unroll size.
|
||||
int first_slots = tagged_slots % kLoopUnrollSize;
|
||||
for (int i = 0; i < first_slots; ++i) {
|
||||
pushq(rax);
|
||||
}
|
||||
Move(rbx, Immediate(tagged_slots / kLoopUnrollSize));
|
||||
// We enter the loop unconditionally, so make sure we need to loop at
|
||||
// least once.
|
||||
DCHECK_GT(tagged_slots / kLoopUnrollSize, 0);
|
||||
Label loop;
|
||||
bind(&loop);
|
||||
for (int i = 0; i < kLoopUnrollSize; ++i) {
|
||||
pushq(rax);
|
||||
}
|
||||
decl(rbx);
|
||||
j(greater, &loop);
|
||||
}
|
||||
if (graph->untagged_stack_slots() > 0) {
|
||||
// Extend rsp by the size of the remaining untagged part of the frame,
|
||||
// no need to initialise these.
|
||||
subq(rsp, Immediate(graph->untagged_stack_slots() * kSystemPointerSize));
|
||||
}
|
||||
}
|
||||
if (graph->untagged_stack_slots() > 0) {
|
||||
// Extend rsp by the size of the remaining untagged part of the frame,
|
||||
// no need to initialise these.
|
||||
subq(rsp, Immediate(graph->untagged_stack_slots() * kSystemPointerSize));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -40,61 +40,6 @@ namespace maglev {
|
||||
|
||||
namespace {
|
||||
|
||||
// ---
|
||||
// Vreg allocation helpers.
|
||||
// ---
|
||||
|
||||
int GetVirtualRegister(Node* node) {
|
||||
return compiler::UnallocatedOperand::cast(node->result().operand())
|
||||
.virtual_register();
|
||||
}
|
||||
|
||||
void DefineAsRegister(MaglevVregAllocationState* vreg_state, Node* node) {
|
||||
node->result().SetUnallocated(
|
||||
compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
|
||||
vreg_state->AllocateVirtualRegister());
|
||||
}
|
||||
void DefineAsConstant(MaglevVregAllocationState* vreg_state, Node* node) {
|
||||
node->result().SetUnallocated(compiler::UnallocatedOperand::NONE,
|
||||
vreg_state->AllocateVirtualRegister());
|
||||
}
|
||||
|
||||
void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node,
|
||||
Register reg) {
|
||||
node->result().SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER,
|
||||
reg.code(),
|
||||
vreg_state->AllocateVirtualRegister());
|
||||
}
|
||||
|
||||
void DefineSameAsFirst(MaglevVregAllocationState* vreg_state, Node* node) {
|
||||
node->result().SetUnallocated(vreg_state->AllocateVirtualRegister(), 0);
|
||||
}
|
||||
|
||||
void UseRegister(Input& input) {
|
||||
input.SetUnallocated(compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
|
||||
compiler::UnallocatedOperand::USED_AT_END,
|
||||
GetVirtualRegister(input.node()));
|
||||
}
|
||||
void UseAndClobberRegister(Input& input) {
|
||||
input.SetUnallocated(compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
|
||||
compiler::UnallocatedOperand::USED_AT_START,
|
||||
GetVirtualRegister(input.node()));
|
||||
}
|
||||
void UseAny(Input& input) {
|
||||
input.SetUnallocated(
|
||||
compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
|
||||
compiler::UnallocatedOperand::USED_AT_END,
|
||||
GetVirtualRegister(input.node()));
|
||||
}
|
||||
void UseFixed(Input& input, Register reg) {
|
||||
input.SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER, reg.code(),
|
||||
GetVirtualRegister(input.node()));
|
||||
}
|
||||
void UseFixed(Input& input, DoubleRegister reg) {
|
||||
input.SetUnallocated(compiler::UnallocatedOperand::FIXED_FP_REGISTER,
|
||||
reg.code(), GetVirtualRegister(input.node()));
|
||||
}
|
||||
|
||||
void AddDeoptRegistersToSnapshot(RegisterSnapshot* snapshot,
|
||||
const EagerDeoptInfo* deopt_info) {
|
||||
detail::DeepForEachInput(deopt_info, [&](ValueNode* node,
|
||||
@ -134,24 +79,6 @@ RegList GetGeneralRegistersUsedAsInputs(const EagerDeoptInfo* deopt_info) {
|
||||
// Nodes
|
||||
// ---
|
||||
|
||||
void SmiConstant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
DefineAsConstant(vreg_state, this);
|
||||
}
|
||||
void SmiConstant::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {}
|
||||
|
||||
void Float64Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
DefineAsConstant(vreg_state, this);
|
||||
}
|
||||
void Float64Constant::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {}
|
||||
|
||||
void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
DefineAsConstant(vreg_state, this);
|
||||
}
|
||||
void Constant::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {}
|
||||
|
||||
void DeleteProperty::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
using D = CallInterfaceDescriptorFor<Builtin::kDeleteProperty>::type;
|
||||
UseFixed(context(), kContextRegister);
|
||||
@ -412,20 +339,6 @@ void GetSecondReturnedValue::GenerateCode(MaglevAssembler* masm,
|
||||
#endif // DEBUG
|
||||
}
|
||||
|
||||
void InitialValue::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
// TODO(leszeks): Make this nicer.
|
||||
result().SetUnallocated(compiler::UnallocatedOperand::FIXED_SLOT,
|
||||
(StandardFrameConstants::kExpressionsOffset -
|
||||
UnoptimizedFrameConstants::kRegisterFileFromFp) /
|
||||
kSystemPointerSize +
|
||||
source().index(),
|
||||
vreg_state->AllocateVirtualRegister());
|
||||
}
|
||||
void InitialValue::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
// No-op, the value is already in the appropriate slot.
|
||||
}
|
||||
|
||||
void LoadGlobal::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
UseFixed(context(), kContextRegister);
|
||||
DefineAsFixed(vreg_state, this, kReturnRegister0);
|
||||
@ -486,12 +399,6 @@ void RegisterInput::GenerateCode(MaglevAssembler* masm,
|
||||
// Nothing to be done, the value is already in the register.
|
||||
}
|
||||
|
||||
void RootConstant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
DefineAsConstant(vreg_state, this);
|
||||
}
|
||||
void RootConstant::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {}
|
||||
|
||||
void CreateEmptyArrayLiteral::AllocateVreg(
|
||||
MaglevVregAllocationState* vreg_state) {
|
||||
DefineAsFixed(vreg_state, this, kReturnRegister0);
|
||||
@ -2190,40 +2097,6 @@ void GapMove::GenerateCode(MaglevAssembler* masm,
|
||||
}
|
||||
}
|
||||
|
||||
void ConstantGapMove::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
namespace {
|
||||
template <typename T>
|
||||
struct GetRegister;
|
||||
template <>
|
||||
struct GetRegister<Register> {
|
||||
static Register Get(compiler::AllocatedOperand target) {
|
||||
return target.GetRegister();
|
||||
}
|
||||
};
|
||||
template <>
|
||||
struct GetRegister<DoubleRegister> {
|
||||
static DoubleRegister Get(compiler::AllocatedOperand target) {
|
||||
return target.GetDoubleRegister();
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
void ConstantGapMove::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
switch (node_->opcode()) {
|
||||
#define CASE(Name) \
|
||||
case Opcode::k##Name: \
|
||||
return node_->Cast<Name>()->DoLoadToRegister( \
|
||||
masm, GetRegister<Name::OutputRegister>::Get(target()));
|
||||
CONSTANT_VALUE_NODE_LIST(CASE)
|
||||
#undef CASE
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr Builtin BuiltinFor(Operation operation) {
|
||||
@ -3037,12 +2910,6 @@ void UnsafeSmiTag::GenerateCode(MaglevAssembler* masm,
|
||||
}
|
||||
}
|
||||
|
||||
void Int32Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
DefineAsConstant(vreg_state, this);
|
||||
}
|
||||
void Int32Constant::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {}
|
||||
|
||||
void Int32ToNumber::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
UseRegister(input());
|
||||
DefineAsRegister(vreg_state, this);
|
||||
@ -4367,31 +4234,6 @@ void Switch::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
|
||||
}
|
||||
}
|
||||
|
||||
void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
|
||||
void Jump::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
|
||||
// Avoid emitting a jump to the next block.
|
||||
if (target() != state.next_block()) {
|
||||
__ jmp(target()->label());
|
||||
}
|
||||
}
|
||||
|
||||
void JumpToInlined::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
|
||||
void JumpToInlined::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
// Avoid emitting a jump to the next block.
|
||||
if (target() != state.next_block()) {
|
||||
__ jmp(target()->label());
|
||||
}
|
||||
}
|
||||
void JumpFromInlined::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
|
||||
void JumpFromInlined::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
// Avoid emitting a jump to the next block.
|
||||
if (target() != state.next_block()) {
|
||||
__ jmp(target()->label());
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
void AttemptOnStackReplacement(MaglevAssembler* masm,
|
||||
|
Loading…
Reference in New Issue
Block a user