[maglev][arm64] Fix some alignment issues with stack slot count

We make sure the number of stack slots in the graph will yield
an aligned stack in the prologue.

We also guarantee the number of extra slots used in a safepoint is
even.

Bug: v8:7700
Change-Id: Ib59fdc5e81dc8f0cf97b7122346cb2decbc58609
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4079164
Auto-Submit: Victor Gomes <victorgomes@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Reviewed-by: Darius Mercadier <dmercadier@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84667}
This commit is contained in:
Victor Gomes 2022-12-05 18:56:43 +01:00 committed by V8 LUCI CQ
parent d0d6ed66fd
commit acfc169b0c
4 changed files with 21 additions and 13 deletions

View File

@ -64,10 +64,10 @@ void MaglevAssembler::Prologue(Graph* graph) {
// that could clobber these registers.
// Push the context and the JSFunction.
Push(kContextRegister, kJSFunctionRegister);
// Push the actual argument count a _possible_ stack slot.
// Push the actual argument count and a _possible_ stack slot.
Push(kJavaScriptCallArgCountRegister, xzr);
int remaining_stack_slots = code_gen_state()->stack_slots() - 1;
DCHECK_GE(remaining_stack_slots, 0);
{
ASM_CODE_COMMENT_STRING(this, " Stack/interrupt check");
// Stack check. This folds the checks for both the interrupt stack limit
@ -103,8 +103,7 @@ void MaglevAssembler::Prologue(Graph* graph) {
__ PopAll(RegisterInput::kAllowedRegisters);
__ B(*done);
},
deferred_call_stack_guard_return,
remaining_stack_slots > 0 ? remaining_stack_slots : 0);
deferred_call_stack_guard_return, remaining_stack_slots);
bind(*deferred_call_stack_guard_return);
}

View File

@ -213,7 +213,13 @@ class SaveRegisterStateForCall {
}
pushed_reg_index++;
}
#ifdef V8_TARGET_ARCH_ARM64
pushed_reg_index = RoundUp<2>(pushed_reg_index);
#endif
int num_pushed_double_reg = snapshot_.live_double_registers.Count();
#ifdef V8_TARGET_ARCH_ARM64
num_pushed_double_reg = RoundUp<2>(num_pushed_double_reg);
#endif
safepoint.SetNumPushedRegisters(pushed_reg_index + num_pushed_double_reg);
return safepoint;
}

View File

@ -40,14 +40,7 @@ class MaglevCodeGenerator final {
int stack_slot_count() const { return code_gen_state_.stack_slots(); }
int stack_slot_count_with_fixed_frame() const {
// TODO(victorgomes): Find a better solution for this instead of ifdeffing
// the arch.
#ifdef V8_TARGET_ARCH_ARM64
return RoundUp(stack_slot_count() + StandardFrameConstants::kFixedSlotCount,
2);
#else
return stack_slot_count() + StandardFrameConstants::kFixedSlotCount;
#endif
}
MaglevAssembler* masm() { return &masm_; }

View File

@ -171,8 +171,18 @@ StraightForwardRegisterAllocator::StraightForwardRegisterAllocator(
: compilation_info_(compilation_info), graph_(graph) {
ComputePostDominatingHoles();
AllocateRegisters();
graph_->set_tagged_stack_slots(tagged_.top);
graph_->set_untagged_stack_slots(untagged_.top);
uint32_t tagged_stack_slots = tagged_.top;
uint32_t untagged_stack_slots = untagged_.top;
#ifdef V8_TARGET_ARCH_ARM64
// Due to alignment constraints, we add one untagged slot if
// stack_slots + fixed_slot_count is odd.
static_assert(StandardFrameConstants::kFixedSlotCount % 2 == 1);
if ((tagged_stack_slots + untagged_stack_slots) % 2 == 0) {
untagged_stack_slots++;
}
#endif // V8_TARGET_ARCH_ARM64
graph_->set_tagged_stack_slots(tagged_stack_slots);
graph_->set_untagged_stack_slots(untagged_stack_slots);
}
StraightForwardRegisterAllocator::~StraightForwardRegisterAllocator() = default;