[maglev] Revive resumable loops

It's possible the path into resumable loop looks dead, while the loop
body itself is resumable and is being optimized due to an active
generator running the loop. By reviving resumable loop headers we have a
chance to properly optimize such generators (and avoid deoptimizing them
prematurely).

Bug: v8:7700
Change-Id: Icf5dadba17a7fd38409193e1e3f702f108a5639e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3918093
Commit-Queue: Toon Verwaest <verwaest@chromium.org>
Auto-Submit: Toon Verwaest <verwaest@chromium.org>
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83432}
This commit is contained in:
Toon Verwaest 2022-09-26 16:06:19 +02:00 committed by V8 LUCI CQ
parent 2cda9fc676
commit 5e1ebeb9a5
6 changed files with 37 additions and 29 deletions

View File

@ -529,6 +529,10 @@ void BytecodeAnalysis::Analyze() {
ResumeJumpTarget::Leaf(suspend_id, resume_offset));
}
if (bytecode == Bytecode::kResumeGenerator) {
current_loop_info->mark_resumable();
}
// If we've reached the header of the loop, pop it off the stack.
if (current_offset == current_loop.header_offset) {
loop_stack_.pop();
@ -536,6 +540,10 @@ void BytecodeAnalysis::Analyze() {
// If there is still an outer loop, propagate inner loop assignments.
LoopInfo* parent_loop_info = loop_stack_.top().loop_info;
if (current_loop_info->resumable()) {
parent_loop_info->mark_resumable();
}
parent_loop_info->assignments().Union(
current_loop_info->assignments());

View File

@ -73,6 +73,8 @@ struct V8_EXPORT_PRIVATE LoopInfo {
resume_jump_targets_(zone) {}
int parent_offset() const { return parent_offset_; }
bool resumable() const { return resumable_; }
void mark_resumable() { resumable_ = true; }
const ZoneVector<ResumeJumpTarget>& resume_jump_targets() const {
return resume_jump_targets_;
@ -87,6 +89,7 @@ struct V8_EXPORT_PRIVATE LoopInfo {
private:
// The offset to the parent loop, or -1 if there is no parent.
int parent_offset_;
bool resumable_ = false;
BytecodeLoopAssignments assignments_;
ZoneVector<ResumeJumpTarget> resume_jump_targets_;
};

View File

@ -187,8 +187,8 @@ void MaglevGraphBuilder::BuildMergeStates() {
std::cout << "- Creating loop merge state at @" << offset << std::endl;
}
merge_states_[offset] = MergePointInterpreterFrameState::NewForLoop(
*compilation_unit_, offset, NumPredecessors(offset), liveness,
&loop_info);
current_interpreter_frame_, *compilation_unit_, offset,
NumPredecessors(offset), liveness, &loop_info);
}
if (bytecode().handler_table_size() > 0) {

View File

@ -207,31 +207,6 @@ class MaglevGraphBuilder {
// Any other bytecode that doesn't return or throw will merge into the
// fallthrough.
MergeDeadIntoFrameState(iterator_.next_offset());
} else if (bytecode == interpreter::Bytecode::kSuspendGenerator) {
// Extra special case for SuspendGenerator, if the suspend is dead then
// the resume has to be dead too. However, the resume already has a merge
// state, with exactly one predecessor (the generator switch), so it will
// be revived along the standard path. This can cause havoc if e.g. the
// suspend/resume are inside a dead loop, because the JumpLoop can become
// live again.
//
// So, manually advance the iterator to the resume, go through the motions
// of processing the merge state, but immediately emit an unconditional
// deopt (which also kills the resume).
iterator_.Advance();
DCHECK_EQ(iterator_.current_bytecode(),
interpreter::Bytecode::kResumeGenerator);
int resume_offset = iterator_.current_offset();
DCHECK_EQ(NumPredecessors(resume_offset), 1);
ProcessMergePoint(resume_offset);
StartNewBlock(resume_offset);
// TODO(v8:7700): This approach is not ideal. We can create a deopt-reopt
// loop: the interpreted code runs, creates a generator while feedback is
// still not yet allocated, then suspends the generator, tiers up to
// maglev, and reaches this deopt. We then deopt, but since the generator
// is never created again, we re-opt without the suspend part and we loop!
EmitUnconditionalDeopt(DeoptimizeReason::kSuspendGeneratorIsDead);
return;
}
// TODO(leszeks): We could now continue iterating the bytecode

View File

@ -442,6 +442,7 @@ class MergePointInterpreterFrameState {
}
static MergePointInterpreterFrameState* NewForLoop(
const InterpreterFrameState& start_state,
const MaglevCompilationUnit& info, int merge_offset,
int predecessor_count, const compiler::BytecodeLivenessState* liveness,
const compiler::LoopInfo* loop_info) {
@ -450,6 +451,11 @@ class MergePointInterpreterFrameState {
info, predecessor_count, 0,
info.zone()->NewArray<BasicBlock*>(predecessor_count),
BasicBlockType::kLoopHeader, liveness);
if (loop_info->resumable()) {
state->known_node_aspects_ =
info.zone()->New<KnownNodeAspects>(info.zone());
state->is_resumable_loop_ = true;
}
auto& assignments = loop_info->assignments();
auto& frame_state = state->frame_state_;
frame_state.ForEachParameter(
@ -457,6 +463,10 @@ class MergePointInterpreterFrameState {
entry = nullptr;
if (assignments.ContainsParameter(reg.ToParameterIndex())) {
entry = state->NewLoopPhi(info.zone(), reg, merge_offset);
} else if (state->is_resumable_loop()) {
// Copy initial values out of the start state.
entry = start_state.get(reg);
DCHECK(entry->Is<InitialValue>());
}
});
// TODO(v8:7700): Add contexts into assignment analysis.
@ -566,7 +576,7 @@ class MergePointInterpreterFrameState {
// deopt).
void MergeDead(const MaglevCompilationUnit& compilation_unit,
int merge_offset) {
DCHECK_GT(predecessor_count_, 1);
DCHECK_GE(predecessor_count_, 1);
DCHECK_LT(predecessors_so_far_, predecessor_count_);
predecessor_count_--;
DCHECK_LE(predecessors_so_far_, predecessor_count_);
@ -630,9 +640,12 @@ class MergePointInterpreterFrameState {
bool is_unreachable_loop() const {
// If there is only one predecessor, and it's not set, then this is a loop
// merge with no forward control flow entering it.
return is_loop() && predecessor_count_ == 1 && predecessors_so_far_ == 0;
return is_loop() && !is_resumable_loop() && predecessor_count_ == 1 &&
predecessors_so_far_ == 0;
}
bool is_resumable_loop() const { return is_resumable_loop_; }
private:
friend void InterpreterFrameState::CopyFrom(
const MaglevCompilationUnit& info,
@ -832,6 +845,7 @@ class MergePointInterpreterFrameState {
int predecessor_count_;
int predecessors_so_far_;
bool is_resumable_loop_ = false;
BasicBlock** predecessors_;
BasicBlockType basic_block_type_;

View File

@ -319,6 +319,14 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
if (block->state()->is_exception_handler()) {
// Exceptions start from a blank state of register values.
ClearRegisterValues();
} else if (block->state()->is_resumable_loop() &&
block->state()->predecessor_count() <= 1) {
// Loops that are only reachable through JumpLoop start from a blank
// state of register values.
// This should actually only support predecessor_count == 1, but we
// currently don't eliminate resumable loop headers (and subsequent code
// until the next resume) that end up being unreachable from JumpLoop.
ClearRegisterValues();
} else {
InitializeRegisterValues(block->state()->register_state());
}