[maglev] Fix dead predecessors after EmitUnconditionalDeopt

Fixes the iteration after emitting an unconditional deopt to kill all
Jumps along the way, not just ones preceeding a merge point. This fixes
several issues:

  a) That Jump may be to a not yet created merge point, in which case we
     were getting a nullptr deref.
  b) Not-yet created merge points would not be detected as merge points,
     so we'd skip over them and miss killing the control node before
     them.
  c) We weren't reducing predecessor counts, so even after fixing the
     nullptr deref above, merge states created later would have the wrong
     predecessor count.

Now, we check bytecode targets (including fallthrough for non-returning
bytecodes) on for every bytecode, and skip over both not-yet created
merges, and loop merges that have no predecessors other than the loop
jump itself.

As part of this, the dead predecessor merging is changed; instead of
setting the predecessor to nullptr, we drop the predecessor count by
one, and trim any Phis' input counts.

Bug: v8:7700
Change-Id: I904c82df7c5dd44d7637e07f6750b35e7e219284
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3599470
Reviewed-by: Victor Gomes <victorgomes@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80083}
This commit is contained in:
Leszek Swirski 2022-04-21 14:20:07 +02:00 committed by V8 LUCI CQ
parent 91badec697
commit f7cc70de2b
4 changed files with 108 additions and 38 deletions

View File

@ -955,6 +955,16 @@ void MaglevGraphBuilder::MergeIntoFrameState(BasicBlock* predecessor,
} }
} }
void MaglevGraphBuilder::MergeDeadIntoFrameState(int target) {
// If there is no merge state yet, don't create one, but just reduce the
// number of possible predecessors to zero.
predecessors_[target]--;
if (merge_states_[target]) {
// If there already is a frame state, merge.
merge_states_[target]->MergeDead(*compilation_unit_, target);
}
}
void MaglevGraphBuilder::MergeIntoInlinedReturnFrameState( void MaglevGraphBuilder::MergeIntoInlinedReturnFrameState(
BasicBlock* predecessor) { BasicBlock* predecessor) {
int target = inline_exit_offset(); int target = inline_exit_offset();

View File

@ -135,38 +135,54 @@ class MaglevGraphBuilder {
BasicBlock* block = CreateBlock<Deopt>({}); BasicBlock* block = CreateBlock<Deopt>({});
ResolveJumpsToBlockAtOffset(block, block_offset_); ResolveJumpsToBlockAtOffset(block, block_offset_);
// Skip any bytecodes remaining in the block, up to the next merge point. // Skip any bytecodes remaining in the block, up to the next merge point
while (!IsOffsetAMergePoint(iterator_.next_offset())) { // with non-dead predecessors.
int saved_offset = iterator_.current_offset();
while (true) {
iterator_.Advance(); iterator_.Advance();
if (iterator_.done()) break; if (iterator_.done()) break;
if (IsOffsetAMergePoint(iterator_.current_offset())) {
// Loops that are unreachable aside from their back-edge are going to
// be entirely unreachable, thanks to irreducibility.
if (!merge_states_[iterator_.current_offset()]->is_unreachable_loop()) {
break;
}
}
// If the current bytecode is a jump to elsewhere, then this jump is
// also dead and we should make sure to merge it as a dead predecessor.
interpreter::Bytecode bytecode = iterator_.current_bytecode();
if (interpreter::Bytecodes::IsForwardJump(bytecode)) {
// Jumps merge into their target, and conditional jumps also merge into
// the fallthrough.
MergeDeadIntoFrameState(iterator_.GetJumpTargetOffset());
if (interpreter::Bytecodes::IsConditionalJump(bytecode)) {
MergeDeadIntoFrameState(iterator_.next_offset());
}
} else if (bytecode == interpreter::Bytecode::kJumpLoop) {
// JumpLoop merges into its loop header, which has to be treated
// specially by the merge..
int target = iterator_.GetJumpTargetOffset();
merge_states_[target]->MergeDeadLoop();
} else if (interpreter::Bytecodes::IsSwitch(bytecode)) {
// Switches merge into their targets, and into the fallthrough.
for (auto offset : iterator_.GetJumpTableTargetOffsets()) {
MergeDeadIntoFrameState(offset.target_offset);
}
MergeDeadIntoFrameState(iterator_.next_offset());
} else if (!interpreter::Bytecodes::Returns(bytecode) &&
!interpreter::Bytecodes::UnconditionallyThrows(bytecode)) {
// Any other bytecode that doesn't return or throw will merge into the
// fallthrough.
MergeDeadIntoFrameState(iterator_.next_offset());
}
saved_offset = iterator_.current_offset();
} }
// If there is control flow out of this block, we need to kill the merges // Restore the offset to before the Advance, so that the bytecode visiting
// into the control flow targets. // loop can Advance it again.
interpreter::Bytecode bytecode = iterator_.current_bytecode(); iterator_.SetOffset(saved_offset);
if (interpreter::Bytecodes::IsForwardJump(bytecode)) {
// Jumps merge into their target, and conditional jumps also merge into
// the fallthrough.
merge_states_[iterator_.GetJumpTargetOffset()]->MergeDead();
if (interpreter::Bytecodes::IsConditionalJump(bytecode)) {
merge_states_[iterator_.next_offset()]->MergeDead();
}
} else if (bytecode == interpreter::Bytecode::kJumpLoop) {
// JumpLoop merges into its loop header, which has to be treated specially
// by the merge..
merge_states_[iterator_.GetJumpTargetOffset()]->MergeDeadLoop();
} else if (interpreter::Bytecodes::IsSwitch(bytecode)) {
// Switches merge into their targets, and into the fallthrough.
for (auto offset : iterator_.GetJumpTableTargetOffsets()) {
merge_states_[offset.target_offset]->MergeDead();
}
merge_states_[iterator_.next_offset()]->MergeDead();
} else if (!interpreter::Bytecodes::Returns(bytecode) &&
!interpreter::Bytecodes::UnconditionallyThrows(bytecode)) {
// Any other bytecode that doesn't return or throw will merge into the
// fallthrough.
merge_states_[iterator_.next_offset()]->MergeDead();
}
} }
void VisitSingleBytecode() { void VisitSingleBytecode() {
@ -496,6 +512,7 @@ class MaglevGraphBuilder {
void VisitBinarySmiOperation(); void VisitBinarySmiOperation();
void MergeIntoFrameState(BasicBlock* block, int target); void MergeIntoFrameState(BasicBlock* block, int target);
void MergeDeadIntoFrameState(int target);
void MergeIntoInlinedReturnFrameState(BasicBlock* block); void MergeIntoInlinedReturnFrameState(BasicBlock* block);
void BuildBranchIfTrue(ValueNode* node, int true_target, int false_target); void BuildBranchIfTrue(ValueNode* node, int true_target, int false_target);
void BuildBranchIfToBooleanTrue(ValueNode* node, int true_target, void BuildBranchIfToBooleanTrue(ValueNode* node, int true_target,

View File

@ -307,7 +307,7 @@ class MergePointInterpreterFrameState {
// Merges an unmerged framestate with a possibly merged framestate into |this| // Merges an unmerged framestate with a possibly merged framestate into |this|
// framestate. // framestate.
void MergeLoop(const MaglevCompilationUnit& compilation_unit, void MergeLoop(MaglevCompilationUnit& compilation_unit,
const InterpreterFrameState& loop_end_state, const InterpreterFrameState& loop_end_state,
BasicBlock* loop_end_block, int merge_offset) { BasicBlock* loop_end_block, int merge_offset) {
DCHECK_EQ(predecessors_so_far_, predecessor_count_); DCHECK_EQ(predecessors_so_far_, predecessor_count_);
@ -318,19 +318,25 @@ class MergePointInterpreterFrameState {
compilation_unit, [&](ValueNode* value, interpreter::Register reg) { compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value); CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
MergeLoopValue(compilation_unit.zone(), reg, value, MergeLoopValue(compilation_unit, reg, value, loop_end_state.get(reg),
loop_end_state.get(reg), merge_offset); merge_offset);
}); });
} }
// Merges a dead framestate (e.g. one which has been early terminated with a // Merges a dead framestate (e.g. one which has been early terminated with a
// deopt). // deopt).
void MergeDead() { void MergeDead(const MaglevCompilationUnit& compilation_unit,
int merge_offset) {
DCHECK_GT(predecessor_count_, 1); DCHECK_GT(predecessor_count_, 1);
DCHECK_LT(predecessors_so_far_, predecessor_count_); DCHECK_LT(predecessors_so_far_, predecessor_count_);
predecessors_[predecessors_so_far_] = kDeadPredecessor; predecessor_count_--;
predecessors_so_far_++;
DCHECK_LE(predecessors_so_far_, predecessor_count_); DCHECK_LE(predecessors_so_far_, predecessor_count_);
frame_state_.ForEachValue(
compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
ReducePhiPredecessorCount(reg, value, merge_offset);
});
} }
// Merges a dead loop framestate (e.g. one where the block containing the // Merges a dead loop framestate (e.g. one where the block containing the
@ -358,11 +364,18 @@ class MergePointInterpreterFrameState {
int predecessor_count() const { return predecessor_count_; } int predecessor_count() const { return predecessor_count_; }
BasicBlock* predecessor_at(int i) const { BasicBlock* predecessor_at(int i) const {
DCHECK_EQ(predecessors_so_far_, predecessor_count_); // DCHECK_EQ(predecessors_so_far_, predecessor_count_);
DCHECK_LT(i, predecessor_count_); DCHECK_LT(i, predecessor_count_);
return predecessors_[i]; return predecessors_[i];
} }
bool is_unreachable_loop() const {
DCHECK_EQ(predecessors_so_far_, predecessor_count_);
// If there is only one predecessor, and it's not set, then this is a loop
// merge with no forward control flow entering it.
return predecessor_count_ == 1 && predecessors_[0] == nullptr;
}
private: private:
friend void InterpreterFrameState::CopyFrom( friend void InterpreterFrameState::CopyFrom(
const MaglevCompilationUnit& info, const MaglevCompilationUnit& info,
@ -458,9 +471,28 @@ class MergePointInterpreterFrameState {
return result; return result;
} }
void MergeLoopValue(Zone* zone, interpreter::Register owner, void ReducePhiPredecessorCount(interpreter::Register owner, ValueNode* merged,
ValueNode* merged, ValueNode* unmerged, int merge_offset) {
int merge_offset) { // If the merged node is null, this is a pre-created loop header merge
// frame with null values for anything that isn't a loop Phi.
if (merged == nullptr) {
DCHECK_NULL(predecessors_[0]);
DCHECK_EQ(predecessors_so_far_, 1);
return;
}
Phi* result = merged->TryCast<Phi>();
if (result != nullptr && result->merge_offset() == merge_offset) {
// It's possible that merged == unmerged at this point since loop-phis are
// not dropped if they are only assigned to themselves in the loop.
DCHECK_EQ(result->owner(), owner);
result->reduce_input_count();
}
}
void MergeLoopValue(MaglevCompilationUnit& compilation_unit,
interpreter::Register owner, ValueNode* merged,
ValueNode* unmerged, int merge_offset) {
Phi* result = merged->TryCast<Phi>(); Phi* result = merged->TryCast<Phi>();
if (result == nullptr || result->merge_offset() != merge_offset) { if (result == nullptr || result->merge_offset() != merge_offset) {
DCHECK_EQ(merged, unmerged); DCHECK_EQ(merged, unmerged);

View File

@ -579,6 +579,16 @@ class NodeBase : public ZoneObject {
new (input_address(index)) Input(input); new (input_address(index)) Input(input);
} }
// For nodes that don't have data past the input, allow trimming the input
// count. This is used by Phis to reduce inputs when merging in dead control
// flow.
void reduce_input_count() {
DCHECK_EQ(opcode(), Opcode::kPhi);
DCHECK(!properties().can_lazy_deopt());
DCHECK(!properties().can_eager_deopt());
bit_field_ = InputCountField::update(bit_field_, input_count() - 1);
}
void set_temporaries_needed(int value) { void set_temporaries_needed(int value) {
#ifdef DEBUG #ifdef DEBUG
DCHECK_EQ(kTemporariesState, kUnset); DCHECK_EQ(kTemporariesState, kUnset);
@ -1311,6 +1321,7 @@ class Phi : public ValueNodeT<Phi> {
interpreter::Register owner() const { return owner_; } interpreter::Register owner() const { return owner_; }
int merge_offset() const { return merge_offset_; } int merge_offset() const { return merge_offset_; }
using Node::reduce_input_count;
using Node::set_input; using Node::set_input;
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);