[turboshaft] Refactor Call and multi-output handling in GraphBuilder

Bug: v8:12783
Change-Id: I0f7896259f0c1344b0a1d9583093de7bec930a77
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4044221
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Commit-Queue: Darius Mercadier <dmercadier@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84486}
This commit is contained in:
Darius M 2022-11-25 11:44:40 +01:00 committed by V8 LUCI CQ
parent d2305bebcd
commit 1618cb9f99
9 changed files with 346 additions and 226 deletions

View File

@ -75,8 +75,8 @@ class ReducerBaseForwarder : public Next {
};
// ReducerBase provides default implementations of Branch-related Operations
// (Goto, Branch, Switch, CatchException), and takes care of updating Block
// predecessors (and calls the Assembler to maintain split-edge form).
// (Goto, Branch, Switch, CallAndCatchException), and takes care of updating
// Block predecessors (and calls the Assembler to maintain split-edge form).
// ReducerBase is always added by Assembler at the bottom of the reducer stack.
template <class Next>
class ReducerBase : public ReducerBaseForwarder<Next> {
@ -135,15 +135,17 @@ class ReducerBase : public ReducerBaseForwarder<Next> {
return new_opindex;
}
OpIndex ReduceCatchException(OpIndex call, Block* if_success,
Block* if_exception) {
OpIndex ReduceCallAndCatchException(OpIndex callee, OpIndex frame_state,
base::Vector<const OpIndex> arguments,
Block* if_success, Block* if_exception,
const TSCallDescriptor* descriptor) {
// {if_success} and {if_exception} should never be the same. If we ever
// decide to lift this condition, then AddPredecessor and SplitEdge should
// be updated accordingly.
DCHECK_NE(if_success, if_exception);
Block* saved_current_block = Asm().current_block();
OpIndex new_opindex =
Base::ReduceCatchException(call, if_success, if_exception);
OpIndex new_opindex = Base::ReduceCallAndCatchException(
callee, frame_state, arguments, if_success, if_exception, descriptor);
Asm().AddPredecessor(saved_current_block, if_success, true);
Asm().AddPredecessor(saved_current_block, if_exception, true);
return new_opindex;
@ -784,9 +786,6 @@ class AssemblerOpInterface {
Block* default_case) {
stack().ReduceSwitch(input, cases, default_case);
}
OpIndex CatchException(OpIndex call, Block* if_success, Block* if_exception) {
return stack().ReduceCatchException(call, if_success, if_exception);
}
void Unreachable() { stack().ReduceUnreachable(); }
OpIndex Parameter(int index, RegisterRepresentation rep,
@ -801,16 +800,17 @@ class AssemblerOpInterface {
Return(Word32Constant(0), base::VectorOf({result}));
}
OpIndex Call(OpIndex callee, base::Vector<const OpIndex> arguments,
OpIndex Call(OpIndex callee, OpIndex frame_state,
base::Vector<const OpIndex> arguments,
const TSCallDescriptor* descriptor) {
return stack().ReduceCall(callee, arguments, descriptor);
return stack().ReduceCall(callee, frame_state, arguments, descriptor);
}
OpIndex CallMaybeDeopt(OpIndex callee, base::Vector<const OpIndex> arguments,
const TSCallDescriptor* descriptor,
OpIndex frame_state) {
OpIndex call = stack().ReduceCall(callee, arguments, descriptor);
stack().ReduceCheckLazyDeopt(call, frame_state);
return call;
OpIndex CallAndCatchException(OpIndex callee, OpIndex frame_state,
base::Vector<const OpIndex> arguments,
Block* if_success, Block* if_exception,
const TSCallDescriptor* descriptor) {
return stack().ReduceCallAndCatchException(
callee, frame_state, arguments, if_success, if_exception, descriptor);
}
void TailCall(OpIndex callee, base::Vector<const OpIndex> arguments,
const TSCallDescriptor* descriptor) {
@ -852,6 +852,9 @@ class AssemblerOpInterface {
return stack().ReducePendingLoopPhi(first, rep, old_backedge_index);
}
OpIndex Tuple(base::Vector<OpIndex> indices) {
return stack().ReduceTuple(indices);
}
OpIndex Tuple(OpIndex a, OpIndex b) {
return stack().ReduceTuple(base::VectorOf({a, b}));
}
@ -860,6 +863,8 @@ class AssemblerOpInterface {
return stack().ReduceProjection(tuple, index, rep);
}
OpIndex LoadException() { return stack().ReduceLoadException(); }
private:
Assembler& stack() { return *static_cast<Assembler*>(this); }
};
@ -911,6 +916,21 @@ class Assembler
Block* current_block() const { return current_block_; }
OpIndex current_operation_origin() const { return current_operation_origin_; }
// ReduceProjection eliminates projections to tuples and returns instead the
// corresponding tuple input. We do this at the top of the stack to avoid
// passing this Projection around needlessly. This is in particular important
// to ValueNumberingReducer, which assumes that it's at the bottom of the
// stack, and that the BaseReducer will actually emit an Operation. If we put
// this projection-to-tuple-simplification in the BaseReducer, then this
// assumption of the ValueNumberingReducer will break.
OpIndex ReduceProjection(OpIndex tuple, uint16_t index,
RegisterRepresentation rep) {
if (auto* tuple_op = this->template TryCast<TupleOp>(tuple)) {
return tuple_op->input(index);
}
return Stack::ReduceProjection(tuple, index, rep);
}
template <class Op, class... Args>
OpIndex Emit(Args... args) {
static_assert((std::is_base_of<Operation, Op>::value));
@ -984,8 +1004,8 @@ class Assembler
// block is not reachable.
intermediate_block->AddPredecessor(source);
// Updating {source}'s last Branch/Switch/CatchException. Note that this
// must be done before Binding {intermediate_block}, otherwise,
// Updating {source}'s last Branch/Switch/CallAndCatchException. Note that
// this must be done before Binding {intermediate_block}, otherwise,
// Reducer::Bind methods will see an invalid block being bound (because its
// predecessor would be a branch, but none of its targets would be the block
// being bound).
@ -1005,13 +1025,14 @@ class Assembler
}
break;
}
case Opcode::kCatchException: {
CatchExceptionOp& catch_exception = op.Cast<CatchExceptionOp>();
case Opcode::kCallAndCatchException: {
CallAndCatchExceptionOp& catch_exception =
op.Cast<CallAndCatchExceptionOp>();
if (catch_exception.if_success == destination) {
catch_exception.if_success = intermediate_block;
// We enforce that CatchException's if_success and if_exception can
// never be the same (there is a DCHECK in Assembler::CatchException
// enforcing that).
// We enforce that CallAndCatchException's if_success and if_exception
// can never be the same (there is a DCHECK in
// Assembler::CallAndCatchException enforcing that).
DCHECK_NE(catch_exception.if_exception, destination);
} else {
DCHECK_EQ(catch_exception.if_exception, destination);

View File

@ -28,6 +28,8 @@
#include "src/compiler/turboshaft/deopt-data.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/compiler/turboshaft/representations.h"
#include "src/zone/zone-containers.h"
namespace v8::internal::compiler::turboshaft {
@ -144,7 +146,23 @@ struct GraphBuilder {
}
OpIndex Process(Node* node, BasicBlock* block,
const base::SmallVector<int, 16>& predecessor_permutation,
base::Optional<BailoutReason>* bailout);
base::Optional<BailoutReason>* bailout,
bool is_final_control = false);
OpIndex EmitProjectionsAndTuple(OpIndex op_idx) {
Operation& op = assembler.output_graph().Get(op_idx);
base::Vector<const RegisterRepresentation> outputs_rep = op.outputs_rep();
if (outputs_rep.size() <= 1) {
// If {op} has a single output, there is no need to emit Projections or
// Tuple, so we just return it.
return op_idx;
}
base::SmallVector<OpIndex, 16> tuple_inputs;
for (size_t i = 0; i < outputs_rep.size(); i++) {
tuple_inputs.push_back(assembler.Projection(op_idx, i, outputs_rep[i]));
}
return assembler.Tuple(base::VectorOf(tuple_inputs));
}
};
base::Optional<BailoutReason> GraphBuilder::Run() {
@ -189,7 +207,7 @@ base::Optional<BailoutReason> GraphBuilder::Run() {
decltype(Operation::input_count)>::max()})) {
return BailoutReason::kTooManyArguments;
}
OpIndex i = Process(node, block, predecessor_permutation, &bailout);
OpIndex i = Process(node, block, predecessor_permutation, &bailout, true);
if (V8_UNLIKELY(bailout)) return bailout;
op_mapping.Set(node, i);
}
@ -209,21 +227,9 @@ base::Optional<BailoutReason> GraphBuilder::Run() {
case BasicBlock::kReturn:
case BasicBlock::kDeoptimize:
case BasicBlock::kThrow:
case BasicBlock::kCall:
case BasicBlock::kTailCall:
break;
case BasicBlock::kCall: {
Node* call = block->control_input();
DCHECK_EQ(call->opcode(), IrOpcode::kCall);
DCHECK_EQ(block->SuccessorCount(), 2);
Block* if_success = Map(block->SuccessorAt(0));
Block* if_exception = Map(block->SuccessorAt(1));
OpIndex catch_exception =
assembler.CatchException(Map(call), if_success, if_exception);
Node* if_exception_node = block->SuccessorAt(1)->NodeAt(0);
DCHECK_EQ(if_exception_node->opcode(), IrOpcode::kIfException);
op_mapping.Set(if_exception_node, catch_exception);
break;
}
case BasicBlock::kNone:
UNREACHABLE();
}
@ -253,7 +259,7 @@ base::Optional<BailoutReason> GraphBuilder::Run() {
OpIndex GraphBuilder::Process(
Node* node, BasicBlock* block,
const base::SmallVector<int, 16>& predecessor_permutation,
base::Optional<BailoutReason>* bailout) {
base::Optional<BailoutReason>* bailout, bool is_final_control) {
assembler.SetCurrentOrigin(OpIndex::EncodeTurbofanNodeId(node->id()));
const Operator* op = node->op();
Operator::Opcode opcode = op->opcode();
@ -273,16 +279,41 @@ OpIndex GraphBuilder::Process(
case IrOpcode::kArgumentsLengthState:
case IrOpcode::kEffectPhi:
case IrOpcode::kTerminate:
case IrOpcode::kIfSuccess:
return OpIndex::Invalid();
case IrOpcode::kIfException: {
// Use the `CatchExceptionOp` that has already been produced when
// processing the call.
OpIndex catch_exception = Map(node);
DCHECK(
assembler.output_graph().Get(catch_exception).Is<CatchExceptionOp>());
return catch_exception;
return assembler.LoadException();
}
case IrOpcode::kIfSuccess: {
// We emit all of the value projections of the call now, emit a Tuple with
// all of those projections, and remap the old call to this new Tuple
// instead of the CallAndCatchExceptionOp.
Node* call = node->InputAt(0);
DCHECK_EQ(call->opcode(), IrOpcode::kCall);
OpIndex call_idx = Map(call);
CallAndCatchExceptionOp& op = assembler.output_graph()
.Get(call_idx)
.Cast<CallAndCatchExceptionOp>();
size_t return_count = op.outputs_rep().size();
DCHECK_EQ(return_count, op.descriptor->descriptor->ReturnCount());
if (return_count <= 1) {
// Calls with one output (or zero) do not require Projections.
return OpIndex::Invalid();
}
base::Vector<OpIndex> projections =
graph_zone->NewVector<OpIndex>(return_count);
for (size_t i = 0; i < return_count; i++) {
projections[i] = assembler.Projection(call_idx, i, op.outputs_rep()[i]);
}
OpIndex tuple_idx = assembler.Tuple(projections);
// Re-mapping {call} to {tuple_idx} so that subsequent projections are not
// emitted.
op_mapping.Set(call, tuple_idx);
return OpIndex::Invalid();
}
case IrOpcode::kParameter: {
@ -385,13 +416,6 @@ OpIndex GraphBuilder::Process(
BINOP_CASE(Float64Pow, Float64Power)
BINOP_CASE(Float64Atan2, Float64Atan2)
BINOP_CASE(Int32AddWithOverflow, Int32AddCheckOverflow)
BINOP_CASE(Int64AddWithOverflow, Int64AddCheckOverflow)
BINOP_CASE(Int32MulWithOverflow, Int32MulCheckOverflow)
BINOP_CASE(Int64MulWithOverflow, Int64MulCheckOverflow)
BINOP_CASE(Int32SubWithOverflow, Int32SubCheckOverflow)
BINOP_CASE(Int64SubWithOverflow, Int64SubCheckOverflow)
BINOP_CASE(Word32Shr, Word32ShiftRightLogical)
BINOP_CASE(Word64Shr, Word64ShiftRightLogical)
@ -424,6 +448,20 @@ OpIndex GraphBuilder::Process(
BINOP_CASE(Float64LessThanOrEqual, Float64LessThanOrEqual)
#undef BINOP_CASE
#define TUPLE_BINOP_CASE(opcode, assembler_op) \
case IrOpcode::k##opcode: { \
OpIndex idx = \
assembler.assembler_op(Map(node->InputAt(0)), Map(node->InputAt(1))); \
return EmitProjectionsAndTuple(idx); \
}
TUPLE_BINOP_CASE(Int32AddWithOverflow, Int32AddCheckOverflow)
TUPLE_BINOP_CASE(Int64AddWithOverflow, Int64AddCheckOverflow)
TUPLE_BINOP_CASE(Int32MulWithOverflow, Int32MulCheckOverflow)
TUPLE_BINOP_CASE(Int64MulWithOverflow, Int64MulCheckOverflow)
TUPLE_BINOP_CASE(Int32SubWithOverflow, Int32SubCheckOverflow)
TUPLE_BINOP_CASE(Int64SubWithOverflow, Int64SubCheckOverflow)
#undef TUPLE_BINOP_CASE
case IrOpcode::kWord64Sar:
case IrOpcode::kWord32Sar: {
WordRepresentation rep = opcode == IrOpcode::kWord64Sar
@ -445,6 +483,11 @@ OpIndex GraphBuilder::Process(
#define UNARY_CASE(opcode, assembler_op) \
case IrOpcode::k##opcode: \
return assembler.assembler_op(Map(node->InputAt(0)));
#define TUPLE_UNARY_CASE(opcode, assembler_op) \
case IrOpcode::k##opcode: { \
OpIndex idx = assembler.assembler_op(Map(node->InputAt(0))); \
return EmitProjectionsAndTuple(idx); \
}
UNARY_CASE(Word32ReverseBytes, Word32ReverseBytes)
UNARY_CASE(Word64ReverseBytes, Word64ReverseBytes)
@ -525,16 +568,18 @@ OpIndex GraphBuilder::Process(
UNARY_CASE(TruncateFloat64ToUint32,
TruncateFloat64ToUint32OverflowUndefined)
UNARY_CASE(TruncateFloat64ToWord32, JSTruncateFloat64ToWord32)
UNARY_CASE(TryTruncateFloat32ToInt64, TryTruncateFloat32ToInt64)
UNARY_CASE(TryTruncateFloat32ToUint64, TryTruncateFloat32ToUint64)
UNARY_CASE(TryTruncateFloat64ToInt32, TryTruncateFloat64ToInt32)
UNARY_CASE(TryTruncateFloat64ToInt64, TryTruncateFloat64ToInt64)
UNARY_CASE(TryTruncateFloat64ToUint32, TryTruncateFloat64ToUint32)
UNARY_CASE(TryTruncateFloat64ToUint64, TryTruncateFloat64ToUint64)
TUPLE_UNARY_CASE(TryTruncateFloat32ToInt64, TryTruncateFloat32ToInt64)
TUPLE_UNARY_CASE(TryTruncateFloat32ToUint64, TryTruncateFloat32ToUint64)
TUPLE_UNARY_CASE(TryTruncateFloat64ToInt32, TryTruncateFloat64ToInt32)
TUPLE_UNARY_CASE(TryTruncateFloat64ToInt64, TryTruncateFloat64ToInt64)
TUPLE_UNARY_CASE(TryTruncateFloat64ToUint32, TryTruncateFloat64ToUint32)
TUPLE_UNARY_CASE(TryTruncateFloat64ToUint64, TryTruncateFloat64ToUint64)
UNARY_CASE(Float64ExtractLowWord32, Float64ExtractLowWord32)
UNARY_CASE(Float64ExtractHighWord32, Float64ExtractHighWord32)
#undef UNARY_CASE
#undef TUPLE_UNARY_CASE
case IrOpcode::kTruncateInt64ToInt32:
// 64- to 32-bit truncation is implicit in Turboshaft.
return Map(node->InputAt(0));
@ -750,14 +795,28 @@ OpIndex GraphBuilder::Process(
TSCallDescriptor* ts_descriptor =
graph_zone->New<TSCallDescriptor>(call_descriptor, output_reps);
OpIndex frame_state_idx = OpIndex::Invalid();
if (call_descriptor->NeedsFrameState()) {
FrameState frame_state{
node->InputAt(static_cast<int>(call_descriptor->InputCount()))};
return assembler.CallMaybeDeopt(callee, base::VectorOf(arguments),
ts_descriptor, Map(frame_state));
frame_state_idx = Map(frame_state);
}
return assembler.Call(callee, base::VectorOf(arguments), ts_descriptor);
if (!is_final_control) {
return EmitProjectionsAndTuple(assembler.Call(
callee, frame_state_idx, base::VectorOf(arguments), ts_descriptor));
} else {
DCHECK_EQ(block->SuccessorCount(), 2);
Block* if_success = Map(block->SuccessorAt(0));
Block* if_exception = Map(block->SuccessorAt(1));
// CallAndCatchException is a block terminator, so we can't generate the
// projections right away. We'll generate them in the IfSuccess
// successor.
return assembler.CallAndCatchException(
callee, frame_state_idx, base::VectorOf(arguments), if_success,
if_exception, ts_descriptor);
}
}
case IrOpcode::kTailCall: {

View File

@ -368,7 +368,7 @@ class Block : public RandomAccessStackDominatorNode<Block> {
switch (LastOperation(graph).opcode) {
case Opcode::kBranch:
case Opcode::kSwitch:
case Opcode::kCatchException:
case Opcode::kCallAndCatchException:
return true;
default:
return false;

View File

@ -964,14 +964,6 @@ class MachineOptimizationReducer : public Next {
}
}
OpIndex ReduceProjection(OpIndex tuple, uint16_t index,
RegisterRepresentation rep) {
if (auto* tuple_op = Asm().template TryCast<TupleOp>(tuple)) {
return tuple_op->input(index);
}
return Next::ReduceProjection(tuple, index, rep);
}
OpIndex ReduceOverflowCheckedBinop(OpIndex left, OpIndex right,
OverflowCheckedBinopOp::Kind kind,
WordRepresentation rep) {

View File

@ -561,22 +561,16 @@ std::ostream& operator<<(std::ostream& os, const Block* b) {
}
std::ostream& operator<<(std::ostream& os, OpProperties opProperties) {
if (opProperties == OpProperties::Pure()) {
os << "Pure";
} else if (opProperties == OpProperties::Reading()) {
os << "Reading";
} else if (opProperties == OpProperties::Writing()) {
os << "Writing";
} else if (opProperties == OpProperties::CanAbort()) {
os << "CanAbort";
} else if (opProperties == OpProperties::AnySideEffects()) {
os << "AnySideEffects";
} else if (opProperties == OpProperties::BlockTerminator()) {
os << "BlockTerminator";
} else {
UNREACHABLE();
#define PRINT_PROPERTY(Name, ...) \
if (opProperties == OpProperties::Name()) { \
return os << #Name; \
}
return os;
ALL_OP_PROPERTIES(PRINT_PROPERTY)
#undef PRINT_PROPERTY
UNREACHABLE();
}
void SwitchOp::PrintOptions(std::ostream& os) const {

View File

@ -92,18 +92,18 @@ class Graph;
V(StackPointerGreaterThan) \
V(StackSlot) \
V(FrameConstant) \
V(CheckLazyDeopt) \
V(Deoptimize) \
V(DeoptimizeIf) \
V(TrapIf) \
V(Phi) \
V(FrameState) \
V(Call) \
V(CallAndCatchException) \
V(LoadException) \
V(TailCall) \
V(Unreachable) \
V(Return) \
V(Branch) \
V(CatchException) \
V(Switch) \
V(Tuple) \
V(Projection) \
@ -154,31 +154,26 @@ struct OpProperties {
can_abort(can_abort),
is_block_terminator(is_block_terminator) {}
static constexpr OpProperties Pure() { return {false, false, false, false}; }
static constexpr OpProperties Reading() {
return {true, false, false, false};
}
static constexpr OpProperties Writing() {
return {false, true, false, false};
}
static constexpr OpProperties CanAbort() {
return {false, false, true, false};
}
static constexpr OpProperties AnySideEffects() {
return {true, true, true, false};
}
static constexpr OpProperties BlockTerminator() {
return {false, false, false, true};
}
static constexpr OpProperties BlockTerminatorWithAnySideEffect() {
return {true, true, true, true};
}
static constexpr OpProperties ReadingAndCanAbort() {
return {true, false, true, false};
}
static constexpr OpProperties WritingAndCanAbort() {
return {false, true, true, false};
#define ALL_OP_PROPERTIES(V) \
V(Pure, false, false, false, false) \
V(Reading, true, false, false, false) \
V(Writing, false, true, false, false) \
V(CanAbort, false, false, true, false) \
V(AnySideEffects, true, true, true, false) \
V(BlockTerminator, false, false, false, true) \
V(BlockTerminatorWithAnySideEffect, true, true, true, true) \
V(ReadingAndCanAbort, true, false, true, false) \
V(WritingAndCanAbort, false, true, true, false)
#define DEFINE_OP_PROPERTY(Name, can_read, can_write, can_abort, \
is_block_terminator) \
static constexpr OpProperties Name() { \
return {can_read, can_write, can_abort, is_block_terminator}; \
}
ALL_OP_PROPERTIES(DEFINE_OP_PROPERTY)
#undef DEFINE_OP_PROPERTY
bool operator==(const OpProperties& other) const {
return can_read == other.can_read && can_write == other.can_write &&
can_abort == other.can_abort &&
@ -1562,7 +1557,9 @@ struct StackSlotOp : FixedArityOperationT<0, StackSlotOp> {
int alignment;
static constexpr OpProperties properties = OpProperties::Writing();
base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
base::Vector<const RegisterRepresentation> outputs_rep() const {
return RepVector<RegisterRepresentation::PointerSized()>();
}
StackSlotOp(int size, int alignment) : size(size), alignment(alignment) {}
auto options() const { return std::tuple{size, alignment}; }
@ -1619,21 +1616,6 @@ struct FrameStateOp : OperationT<FrameStateOp> {
auto options() const { return std::tuple{inlined, data}; }
};
// CheckLazyDeoptOp should always immediately follow a call.
// Semantically, it deopts if the current code object has been
// deoptimized. But this might also be implemented differently.
struct CheckLazyDeoptOp : FixedArityOperationT<2, CheckLazyDeoptOp> {
static constexpr OpProperties properties = OpProperties::CanAbort();
base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; }
OpIndex call() const { return input(0); }
OpIndex frame_state() const { return input(1); }
CheckLazyDeoptOp(OpIndex call, OpIndex frame_state)
: Base(call, frame_state) {}
auto options() const { return std::tuple{}; }
};
struct DeoptimizeOp : FixedArityOperationT<1, DeoptimizeOp> {
const DeoptimizeParameters* parameters;
@ -1728,27 +1710,106 @@ struct CallOp : OperationT<CallOp> {
return descriptor->out_reps;
}
OpIndex callee() const { return input(0); }
base::Vector<const OpIndex> arguments() const {
return inputs().SubVector(1, input_count);
bool HasFrameState() const {
return descriptor->descriptor->NeedsFrameState();
}
CallOp(OpIndex callee, base::Vector<const OpIndex> arguments,
OpIndex callee() const { return input(0); }
OpIndex frame_state() const {
return HasFrameState() ? input(1) : OpIndex::Invalid();
}
base::Vector<const OpIndex> arguments() const {
return inputs().SubVector(1 + HasFrameState(), input_count);
}
CallOp(OpIndex callee, OpIndex frame_state,
base::Vector<const OpIndex> arguments,
const TSCallDescriptor* descriptor)
: Base(1 + arguments.size()), descriptor(descriptor) {
: Base(1 + frame_state.valid() + arguments.size()),
descriptor(descriptor) {
base::Vector<OpIndex> inputs = this->inputs();
inputs[0] = callee;
inputs.SubVector(1, inputs.size()).OverwriteWith(arguments);
if (frame_state.valid()) {
inputs[1] = frame_state;
}
inputs.SubVector(1 + frame_state.valid(), inputs.size())
.OverwriteWith(arguments);
}
static CallOp& New(Graph* graph, OpIndex callee,
static CallOp& New(Graph* graph, OpIndex callee, OpIndex frame_state,
base::Vector<const OpIndex> arguments,
const TSCallDescriptor* descriptor) {
return Base::New(graph, 1 + arguments.size(), callee, arguments,
descriptor);
return Base::New(graph, 1 + frame_state.valid() + arguments.size(), callee,
frame_state, arguments, descriptor);
}
auto options() const { return std::tuple{descriptor}; }
};
struct CallAndCatchExceptionOp : OperationT<CallAndCatchExceptionOp> {
const TSCallDescriptor* descriptor;
Block* if_success;
Block* if_exception;
static constexpr OpProperties properties =
OpProperties::BlockTerminatorWithAnySideEffect();
base::Vector<const RegisterRepresentation> outputs_rep() const {
return descriptor->out_reps;
}
bool HasFrameState() const {
return descriptor->descriptor->NeedsFrameState();
}
OpIndex callee() const { return input(0); }
OpIndex frame_state() const {
return HasFrameState() ? input(1) : OpIndex::Invalid();
}
base::Vector<const OpIndex> arguments() const {
return inputs().SubVector(1 + HasFrameState(), input_count);
}
CallAndCatchExceptionOp(OpIndex callee, OpIndex frame_state,
base::Vector<const OpIndex> arguments,
Block* if_success, Block* if_exception,
const TSCallDescriptor* descriptor)
: Base(1 + frame_state.valid() + arguments.size()),
descriptor(descriptor),
if_success(if_success),
if_exception(if_exception) {
base::Vector<OpIndex> inputs = this->inputs();
inputs[0] = callee;
if (frame_state.valid()) {
inputs[1] = frame_state;
}
inputs.SubVector(1 + frame_state.valid(), inputs.size())
.OverwriteWith(arguments);
}
static CallAndCatchExceptionOp& New(Graph* graph, OpIndex callee,
OpIndex frame_state,
base::Vector<const OpIndex> arguments,
Block* if_success, Block* if_exception,
const TSCallDescriptor* descriptor) {
return Base::New(graph, 1 + frame_state.valid() + arguments.size(), callee,
frame_state, arguments, if_success, if_exception,
descriptor);
}
auto options() const {
return std::tuple{descriptor, if_success, if_exception};
}
};
struct LoadExceptionOp : FixedArityOperationT<0, LoadExceptionOp> {
static constexpr OpProperties properties = OpProperties::Reading();
base::Vector<const RegisterRepresentation> outputs_rep() const {
return RepVector<RegisterRepresentation::Tagged()>();
}
explicit LoadExceptionOp() : Base() {}
auto options() const { return std::tuple{}; }
};
struct TailCallOp : OperationT<TailCallOp> {
const TSCallDescriptor* descriptor;
@ -1836,26 +1897,6 @@ struct BranchOp : FixedArityOperationT<1, BranchOp> {
auto options() const { return std::tuple{if_true, if_false}; }
};
// `CatchExceptionOp` has to follow a `CallOp` with a subsequent
// `CheckLazyDeoptOp`. It provides the exception value, which might only be used
// from the `if_exception` successor.
struct CatchExceptionOp : FixedArityOperationT<1, CatchExceptionOp> {
Block* if_success;
Block* if_exception;
static constexpr OpProperties properties = OpProperties::BlockTerminator();
base::Vector<const RegisterRepresentation> outputs_rep() const {
return RepVector<RegisterRepresentation::Tagged()>();
}
OpIndex call() const { return input(0); }
explicit CatchExceptionOp(OpIndex call, Block* if_success,
Block* if_exception)
: Base(call), if_success(if_success), if_exception(if_exception) {}
auto options() const { return std::tuple{if_success, if_exception}; }
};
struct SwitchOp : FixedArityOperationT<1, SwitchOp> {
struct Case {
int32_t value;
@ -1995,33 +2036,16 @@ inline size_t Operation::StorageSlotCount(Opcode opcode, size_t input_count) {
return std::max<size_t>(2, (r - 1 + size + input_count) / r);
}
// CanBeUsedAsInput<Op> returns true if Op can be used as an input for another
// Operation. For instance, a WordBinop can be used as an input of another
// Operation, but a Goto cannot.
template <class Op>
constexpr inline bool CanBeUsedAsInput() {
// CatchException is the only block terminator that can be used as input.
if (std::is_same<Op, CatchExceptionOp>::value) return true;
if (kOperationPropertiesTable[static_cast<uint8_t>(Op::opcode)].has_value() &&
kOperationPropertiesTable[static_cast<uint8_t>(Op::opcode)]
->is_block_terminator) {
return false;
V8_INLINE bool CanBeUsedAsInput(const Op& op) {
if (std::is_same<Op, FrameStateOp>::value) {
// FrameStateOp is the only Operation that can be used as an input but has
// empty `outputs_rep`.
return true;
}
#define FALSE_CASE(op_to_test) \
if constexpr (std::is_same<Op, op_to_test>::value) { \
return false; \
}
FALSE_CASE(StoreOp);
FALSE_CASE(RetainOp);
FALSE_CASE(CheckLazyDeoptOp);
FALSE_CASE(DeoptimizeIfOp);
FALSE_CASE(TrapIfOp);
#undef FALSE_CASE
// All other types of Op can be used as input for other Operations.
return true;
// For all other Operations, they can only be used as an input if they have at
// least one output.
return op.outputs_rep().size() > 0;
}
inline base::Vector<const RegisterRepresentation> Operation::outputs_rep()

View File

@ -219,6 +219,7 @@ class GraphVisitor {
template <bool can_be_invalid = false>
OpIndex MapToNewGraph(OpIndex old_index, int predecessor_index = -1) {
DCHECK(old_index.valid());
OpIndex result = op_mapping_[old_index.id()];
if (!result.valid()) {
// {op_mapping} doesn't have a mapping for {old_index}. The assembler
@ -229,6 +230,7 @@ class GraphVisitor {
return OpIndex::Invalid();
}
}
DCHECK(var.has_value());
if (predecessor_index == -1) {
result = assembler().Get(var.value());
} else {
@ -319,7 +321,7 @@ class GraphVisitor {
#define EMIT_INSTR_CASE(Name) \
case Opcode::k##Name: \
new_index = this->Visit##Name(op.Cast<Name##Op>()); \
if constexpr (CanBeUsedAsInput<Name##Op>()) { \
if (CanBeUsedAsInput(op.Cast<Name##Op>())) { \
CreateOldToNewMapping(index, new_index); \
} \
break;
@ -385,12 +387,6 @@ class GraphVisitor {
return assembler().ReduceBranch(MapToNewGraph(op.condition()), if_true,
if_false);
}
OpIndex VisitCatchException(const CatchExceptionOp& op) {
Block* if_success = MapToNewGraph(op.if_success->index());
Block* if_exception = MapToNewGraph(op.if_exception->index());
return assembler().ReduceCatchException(MapToNewGraph(op.call()),
if_success, if_exception);
}
OpIndex VisitSwitch(const SwitchOp& op) {
base::SmallVector<SwitchOp::Case, 16> cases;
for (SwitchOp::Case c : op.cases) {
@ -511,9 +507,23 @@ class GraphVisitor {
}
OpIndex VisitCall(const CallOp& op) {
OpIndex callee = MapToNewGraph(op.callee());
OpIndex frame_state = MapToNewGraphIfValid(op.frame_state());
auto arguments = MapToNewGraph<16>(op.arguments());
return assembler().ReduceCall(callee, base::VectorOf(arguments),
op.descriptor);
return assembler().ReduceCall(callee, frame_state,
base::VectorOf(arguments), op.descriptor);
}
OpIndex VisitCallAndCatchException(const CallAndCatchExceptionOp& op) {
OpIndex callee = MapToNewGraph(op.callee());
Block* if_success = MapToNewGraph(op.if_success->index());
Block* if_exception = MapToNewGraph(op.if_exception->index());
OpIndex frame_state = MapToNewGraphIfValid(op.frame_state());
auto arguments = MapToNewGraph<16>(op.arguments());
return assembler().ReduceCallAndCatchException(
callee, frame_state, base::VectorOf(arguments), if_success,
if_exception, op.descriptor);
}
OpIndex VisitLoadException(const LoadExceptionOp& op) {
return assembler().ReduceLoadException();
}
OpIndex VisitTailCall(const TailCallOp& op) {
OpIndex callee = MapToNewGraph(op.callee());
@ -578,14 +588,12 @@ class GraphVisitor {
}
OpIndex VisitLoad(const LoadOp& op) {
return assembler().ReduceLoad(
MapToNewGraph(op.base()),
op.index().valid() ? MapToNewGraph(op.index()) : OpIndex::Invalid(),
op.kind, op.loaded_rep, op.result_rep, op.offset, op.element_size_log2);
MapToNewGraph(op.base()), MapToNewGraphIfValid(op.index()), op.kind,
op.loaded_rep, op.result_rep, op.offset, op.element_size_log2);
}
OpIndex VisitStore(const StoreOp& op) {
return assembler().ReduceStore(
MapToNewGraph(op.base()),
op.index().valid() ? MapToNewGraph(op.index()) : OpIndex::Invalid(),
MapToNewGraph(op.base()), MapToNewGraphIfValid(op.index()),
MapToNewGraph(op.value()), op.kind, op.stored_rep, op.write_barrier,
op.offset, op.element_size_log2);
}
@ -609,10 +617,6 @@ class GraphVisitor {
OpIndex VisitFrameConstant(const FrameConstantOp& op) {
return assembler().ReduceFrameConstant(op.kind);
}
OpIndex VisitCheckLazyDeopt(const CheckLazyDeoptOp& op) {
return assembler().ReduceCheckLazyDeopt(MapToNewGraph(op.call()),
MapToNewGraph(op.frame_state()));
}
OpIndex VisitDeoptimize(const DeoptimizeOp& op) {
return assembler().ReduceDeoptimize(MapToNewGraph(op.frame_state()),
op.parameters);
@ -674,6 +678,13 @@ class GraphVisitor {
return result;
}
template <bool can_be_invalid = false>
OpIndex MapToNewGraphIfValid(OpIndex old_index, int predecessor_index = -1) {
return old_index.valid()
? MapToNewGraph<can_be_invalid>(old_index, predecessor_index)
: OpIndex::Invalid();
}
MaybeVariable GetVariableFor(OpIndex old_index) const {
return old_opindex_to_variables[old_index];
}

View File

@ -1018,12 +1018,6 @@ Node* ScheduleBuilder::ProcessOperation(const FrameConstantOp& op) {
return AddNode(machine.LoadParentFramePointer(), {});
}
}
Node* ScheduleBuilder::ProcessOperation(const CheckLazyDeoptOp& op) {
Node* call = GetNode(op.call());
Node* frame_state = GetNode(op.frame_state());
call->AppendInput(graph_zone, frame_state);
return nullptr;
}
Node* ScheduleBuilder::ProcessOperation(const DeoptimizeIfOp& op) {
Node* condition = GetNode(op.condition());
Node* frame_state = GetNode(op.frame_state());
@ -1216,9 +1210,46 @@ Node* ScheduleBuilder::ProcessOperation(const CallOp& op) {
for (OpIndex i : op.arguments()) {
inputs.push_back(GetNode(i));
}
if (op.HasFrameState()) {
DCHECK(op.frame_state().valid());
inputs.push_back(GetNode(op.frame_state()));
}
return AddNode(common.Call(op.descriptor->descriptor),
base::VectorOf(inputs));
}
Node* ScheduleBuilder::ProcessOperation(const CallAndCatchExceptionOp& op) {
// Re-building the call
base::SmallVector<Node*, 16> inputs;
inputs.push_back(GetNode(op.callee()));
for (OpIndex i : op.arguments()) {
inputs.push_back(GetNode(i));
}
if (op.HasFrameState()) {
DCHECK(op.frame_state().valid());
inputs.push_back(GetNode(op.frame_state()));
}
Node* call =
AddNode(common.Call(op.descriptor->descriptor), base::VectorOf(inputs));
// Re-building the IfSuccess/IfException mechanism.
BasicBlock* success_block = GetBlock(*op.if_success);
BasicBlock* exception_block = GetBlock(*op.if_exception);
schedule->AddCall(current_block, call, success_block, exception_block);
// Pass `call` as the control input of `IfSuccess` and as both the effect and
// control input of `IfException`.
Node* if_success = MakeNode(common.IfSuccess(), {call});
Node* if_exception = MakeNode(common.IfException(), {call, call});
schedule->AddNode(success_block, if_success);
schedule->AddNode(exception_block, if_exception);
current_block = nullptr;
return call;
}
Node* ScheduleBuilder::ProcessOperation(const LoadExceptionOp& op) {
Node* if_exception = current_block->NodeAt(0);
DCHECK(if_exception != nullptr &&
if_exception->opcode() == IrOpcode::kIfException);
return if_exception;
}
Node* ScheduleBuilder::ProcessOperation(const TailCallOp& op) {
base::SmallVector<Node*, 16> inputs;
inputs.push_back(GetNode(op.callee()));
@ -1260,19 +1291,6 @@ Node* ScheduleBuilder::ProcessOperation(const BranchOp& op) {
current_block = nullptr;
return nullptr;
}
Node* ScheduleBuilder::ProcessOperation(const CatchExceptionOp& op) {
Node* call = GetNode(op.call());
BasicBlock* success_block = GetBlock(*op.if_success);
BasicBlock* exception_block = GetBlock(*op.if_exception);
schedule->AddCall(current_block, call, success_block, exception_block);
Node* if_success = MakeNode(common.IfSuccess(), {call});
Node* if_exception = MakeNode(common.IfException(), {call, call});
schedule->AddNode(success_block, if_success);
// Pass `call` as both the effect and control input of `IfException`.
schedule->AddNode(exception_block, if_exception);
current_block = nullptr;
return if_exception;
}
Node* ScheduleBuilder::ProcessOperation(const SwitchOp& op) {
size_t succ_count = op.cases.size() + 1;
Node* switch_node =

View File

@ -80,10 +80,13 @@ class VariableReducer : public Next {
auto merge_variables = [&](Variable var,
base::Vector<OpIndex> predecessors) -> OpIndex {
ConstantOp* first_constant = Asm()
.output_graph()
.Get(predecessors[0])
.template TryCast<ConstantOp>();
ConstantOp* first_constant = nullptr;
if (predecessors[0].valid()) {
first_constant = Asm()
.output_graph()
.Get(predecessors[0])
.template TryCast<ConstantOp>();
}
bool all_are_same_constant = first_constant != nullptr;
for (OpIndex idx : predecessors) {
@ -188,7 +191,6 @@ class VariableReducer : public Next {
case Opcode::kStore:
case Opcode::kRetain:
case Opcode::kStackSlot:
case Opcode::kCheckLazyDeopt:
case Opcode::kDeoptimize:
case Opcode::kDeoptimizeIf:
case Opcode::kTrapIf:
@ -200,7 +202,6 @@ class VariableReducer : public Next {
case Opcode::kReturn:
case Opcode::kGoto:
case Opcode::kBranch:
case Opcode::kCatchException:
case Opcode::kSwitch:
case Opcode::kTuple:
case Opcode::kProjection: