[turboshaft] Refactor OptimizationPhaseImpl
OptimizationPhaseImpl is now called GraphVisitor. Its ReduceXXX methods are now called VisitXXX, to avoid name conflicts with Assembler/Reducer methods. Its non-template-dependent fields have been moved out to a separate class (which will be easier to use in some contexts). Assembler now inherits from GraphVisitor (ex OptimizationPhaseImpl), which allows it to trigger visitation of Blocks or Operations. Bug: v8:12783 Change-Id: I14f57621c62fc83f27fae1169be514a400396ecd Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3985908 Reviewed-by: Tobias Tebbi <tebbi@chromium.org> Commit-Queue: Darius Mercadier <dmercadier@chromium.org> Auto-Submit: Darius Mercadier <dmercadier@chromium.org> Cr-Commit-Position: refs/heads/main@{#84089}
This commit is contained in:
parent
bbcf8b6c3a
commit
0c9ca252b8
@ -1965,10 +1965,11 @@ struct LateOptimizationPhase {
|
||||
if (data->HasTurboshaftGraph()) {
|
||||
// TODO(dmercadier,tebbi): add missing reducers (LateEscapeAnalysis,
|
||||
// BranchElimination, MachineOperatorReducer and CommonOperatorReducer).
|
||||
turboshaft::OptimizationPhase<turboshaft::SelectLoweringReducer,
|
||||
turboshaft::ValueNumberingReducer>::
|
||||
Run(&data->turboshaft_graph(), temp_zone, data->node_origins(),
|
||||
turboshaft::VisitOrder::kDominator);
|
||||
turboshaft::OptimizationPhase<
|
||||
turboshaft::SelectLoweringReducer,
|
||||
turboshaft::ValueNumberingReducer>::Run(&data->turboshaft_graph(),
|
||||
temp_zone,
|
||||
data->node_origins());
|
||||
} else {
|
||||
GraphReducer graph_reducer(temp_zone, data->graph(),
|
||||
&data->info()->tick_counter(), data->broker(),
|
||||
@ -2081,9 +2082,9 @@ struct OptimizeTurboshaftPhase {
|
||||
v8_flags.turboshaft_trace_reduction);
|
||||
turboshaft::OptimizationPhase<
|
||||
turboshaft::MachineOptimizationReducerSignallingNanImpossible,
|
||||
turboshaft::ValueNumberingReducer>::
|
||||
Run(&data->turboshaft_graph(), temp_zone, data->node_origins(),
|
||||
turboshaft::VisitOrder::kDominator);
|
||||
turboshaft::ValueNumberingReducer>::Run(&data->turboshaft_graph(),
|
||||
temp_zone,
|
||||
data->node_origins());
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -24,26 +24,18 @@
|
||||
|
||||
namespace v8::internal::compiler::turboshaft {
|
||||
|
||||
// Forward declarations
|
||||
struct OptimizationPhaseState;
|
||||
|
||||
template <class Assembler, template <class> class... Reducers>
|
||||
class ReducerStack {};
|
||||
|
||||
template <class Assembler, template <class> class FirstReducer,
|
||||
template <class> class... Reducers>
|
||||
class ReducerStack<Assembler, FirstReducer, Reducers...>
|
||||
: public FirstReducer<ReducerStack<Assembler, Reducers...>> {
|
||||
using FirstReducer<ReducerStack<Assembler, Reducers...>>::FirstReducer;
|
||||
};
|
||||
: public FirstReducer<ReducerStack<Assembler, Reducers...>> {};
|
||||
|
||||
template <class Assembler>
|
||||
class ReducerStack<Assembler> {
|
||||
public:
|
||||
Assembler& Asm() { return *static_cast<Assembler*>(this); }
|
||||
|
||||
template <typename... Args>
|
||||
explicit ReducerStack(Args&&...) {}
|
||||
};
|
||||
|
||||
// This empty base-class is used to provide default-implementations of plain
|
||||
@ -58,8 +50,6 @@ class ReducerBaseForwarder : public Next {
|
||||
}
|
||||
TURBOSHAFT_OPERATION_LIST(EMIT_OP)
|
||||
#undef EMIT_OP
|
||||
|
||||
using Next::Next;
|
||||
};
|
||||
|
||||
// ReducerBase provides default implementations of Branch-related Operations
|
||||
@ -72,8 +62,6 @@ class ReducerBase : public ReducerBaseForwarder<Next> {
|
||||
using Next::Asm;
|
||||
using Base = ReducerBaseForwarder<Next>;
|
||||
|
||||
using Base::Base;
|
||||
|
||||
void Bind(Block*, const Block*) {}
|
||||
|
||||
OpIndex ReducePhi(base::Vector<const OpIndex> inputs,
|
||||
@ -809,35 +797,38 @@ class AssemblerOpInterface {
|
||||
|
||||
template <template <class> class... Reducers>
|
||||
class Assembler
|
||||
: public ReducerStack<Assembler<Reducers...>, Reducers..., ReducerBase>,
|
||||
: public GraphVisitor<Assembler<Reducers...>>,
|
||||
public ReducerStack<Assembler<Reducers...>, Reducers..., ReducerBase>,
|
||||
public OperationMatching<Assembler<Reducers...>>,
|
||||
public AssemblerOpInterface<Assembler<Reducers...>> {
|
||||
using Stack = ReducerStack<Assembler<Reducers...>, Reducers...,
|
||||
v8::internal::compiler::turboshaft::ReducerBase>;
|
||||
|
||||
public:
|
||||
explicit Assembler(Graph* graph, Zone* phase_zone,
|
||||
explicit Assembler(Graph& input_graph, Graph& output_graph, Zone* phase_zone,
|
||||
compiler::NodeOriginTable* origins = nullptr)
|
||||
: Stack(graph, phase_zone), graph_(*graph), phase_zone_(phase_zone) {
|
||||
graph_.Reset();
|
||||
: GraphVisitor<Assembler>(input_graph, output_graph, phase_zone,
|
||||
origins) {
|
||||
SupportedOperations::Initialize();
|
||||
}
|
||||
|
||||
Block* NewBlock(Block::Kind kind) { return graph_.NewBlock(kind); }
|
||||
Block* NewBlock(Block::Kind kind) {
|
||||
return this->output_graph().NewBlock(kind);
|
||||
}
|
||||
|
||||
using OperationMatching<Assembler<Reducers...>>::Get;
|
||||
|
||||
V8_INLINE V8_WARN_UNUSED_RESULT bool Bind(Block* block,
|
||||
const Block* origin = nullptr) {
|
||||
if (!graph().Add(block)) return false;
|
||||
if (!this->output_graph().Add(block)) return false;
|
||||
DCHECK_NULL(current_block_);
|
||||
current_block_ = block;
|
||||
Stack::Bind(block, origin);
|
||||
return true;
|
||||
}
|
||||
|
||||
V8_INLINE void BindReachable(Block* block) {
|
||||
bool bound = Bind(block);
|
||||
V8_INLINE void BindReachable(Block* block, const Block* origin = nullptr) {
|
||||
bool bound = Bind(block, origin);
|
||||
DCHECK(bound);
|
||||
USE(bound);
|
||||
}
|
||||
@ -847,32 +838,31 @@ class Assembler
|
||||
}
|
||||
|
||||
Block* current_block() const { return current_block_; }
|
||||
Zone* graph_zone() const { return graph().graph_zone(); }
|
||||
Graph& graph() const { return graph_; }
|
||||
Zone* phase_zone() { return phase_zone_; }
|
||||
OpIndex current_operation_origin() const { return current_operation_origin_; }
|
||||
|
||||
template <class Op, class... Args>
|
||||
OpIndex Emit(Args... args) {
|
||||
static_assert((std::is_base_of<Operation, Op>::value));
|
||||
static_assert(!(std::is_same<Op, Operation>::value));
|
||||
DCHECK_NOT_NULL(current_block_);
|
||||
OpIndex result = graph().next_operation_index();
|
||||
Op& op = graph().template Add<Op>(args...);
|
||||
graph().operation_origins()[result] = current_operation_origin_;
|
||||
OpIndex result = this->output_graph().next_operation_index();
|
||||
Op& op = this->output_graph().template Add<Op>(args...);
|
||||
this->output_graph().operation_origins()[result] =
|
||||
current_operation_origin_;
|
||||
if (op.Properties().is_block_terminator) FinalizeBlock();
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
void FinalizeBlock() {
|
||||
graph().Finalize(current_block_);
|
||||
this->output_graph().Finalize(current_block_);
|
||||
current_block_ = nullptr;
|
||||
}
|
||||
|
||||
Block* current_block_ = nullptr;
|
||||
Graph& graph_;
|
||||
// TODO(dmercadier,tebbi): remove {current_operation_origin_} and pass instead
|
||||
// additional parameters to ReduceXXX methods.
|
||||
OpIndex current_operation_origin_ = OpIndex::Invalid();
|
||||
Zone* const phase_zone_;
|
||||
};
|
||||
|
||||
} // namespace v8::internal::compiler::turboshaft
|
||||
|
@ -48,7 +48,7 @@ struct GraphBuilder {
|
||||
private:
|
||||
OpIndex Map(Node* old_node) {
|
||||
OpIndex result = op_mapping.Get(old_node);
|
||||
DCHECK(assembler.graph().IsValid(result));
|
||||
DCHECK(assembler.output_graph().IsValid(result));
|
||||
return result;
|
||||
}
|
||||
Block* Map(BasicBlock* block) {
|
||||
@ -59,11 +59,11 @@ struct GraphBuilder {
|
||||
|
||||
void FixLoopPhis(Block* loop, Block* backedge) {
|
||||
DCHECK(loop->IsLoop());
|
||||
for (Operation& op : assembler.graph().operations(*loop)) {
|
||||
for (Operation& op : assembler.output_graph().operations(*loop)) {
|
||||
if (!op.Is<PendingLoopPhiOp>()) continue;
|
||||
auto& pending_phi = op.Cast<PendingLoopPhiOp>();
|
||||
assembler.graph().Replace<PhiOp>(
|
||||
assembler.graph().Index(pending_phi),
|
||||
assembler.output_graph().Replace<PhiOp>(
|
||||
assembler.output_graph().Index(pending_phi),
|
||||
base::VectorOf(
|
||||
{pending_phi.first(), Map(pending_phi.old_backedge_node)}),
|
||||
pending_phi.rep);
|
||||
@ -223,17 +223,18 @@ base::Optional<BailoutReason> GraphBuilder::Run() {
|
||||
}
|
||||
|
||||
if (source_positions->IsEnabled()) {
|
||||
for (OpIndex index : assembler.graph().AllOperationIndices()) {
|
||||
compiler::NodeId origin =
|
||||
assembler.graph().operation_origins()[index].DecodeTurbofanNodeId();
|
||||
assembler.graph().source_positions()[index] =
|
||||
for (OpIndex index : assembler.output_graph().AllOperationIndices()) {
|
||||
compiler::NodeId origin = assembler.output_graph()
|
||||
.operation_origins()[index]
|
||||
.DecodeTurbofanNodeId();
|
||||
assembler.output_graph().source_positions()[index] =
|
||||
source_positions->GetSourcePosition(origin);
|
||||
}
|
||||
}
|
||||
|
||||
if (origins) {
|
||||
for (OpIndex index : assembler.graph().AllOperationIndices()) {
|
||||
OpIndex origin = assembler.graph().operation_origins()[index];
|
||||
for (OpIndex index : assembler.output_graph().AllOperationIndices()) {
|
||||
OpIndex origin = assembler.output_graph().operation_origins()[index];
|
||||
origins->SetNodeOrigin(index.id(), origin.DecodeTurbofanNodeId());
|
||||
}
|
||||
}
|
||||
@ -270,7 +271,8 @@ OpIndex GraphBuilder::Process(
|
||||
// Use the `CatchExceptionOp` that has already been produced when
|
||||
// processing the call.
|
||||
OpIndex catch_exception = Map(node);
|
||||
DCHECK(assembler.graph().Get(catch_exception).Is<CatchExceptionOp>());
|
||||
DCHECK(
|
||||
assembler.output_graph().Get(catch_exception).Is<CatchExceptionOp>());
|
||||
return catch_exception;
|
||||
}
|
||||
|
||||
@ -817,9 +819,10 @@ base::Optional<BailoutReason> BuildGraph(Schedule* schedule, Zone* graph_zone,
|
||||
Zone* phase_zone, Graph* graph,
|
||||
SourcePositionTable* source_positions,
|
||||
NodeOriginTable* origins) {
|
||||
GraphBuilder builder{graph_zone, phase_zone,
|
||||
*schedule, Assembler<>(graph, phase_zone),
|
||||
source_positions, origins};
|
||||
GraphBuilder builder{
|
||||
graph_zone, phase_zone,
|
||||
*schedule, Assembler<>(*graph, *graph, phase_zone),
|
||||
source_positions, origins};
|
||||
return builder.Run();
|
||||
}
|
||||
|
||||
|
@ -413,10 +413,6 @@ class Graph {
|
||||
DCHECK_LT(i.id(), bound_blocks_.size());
|
||||
return *bound_blocks_[i.id()];
|
||||
}
|
||||
Block* GetPtr(uint32_t index) {
|
||||
DCHECK_LT(index, bound_blocks_.size());
|
||||
return bound_blocks_[index];
|
||||
}
|
||||
|
||||
OpIndex Index(const Operation& op) const { return operations_.Index(op); }
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "src/compiler/backend/instruction.h"
|
||||
#include "src/compiler/turboshaft/assembler.h"
|
||||
#include "src/compiler/turboshaft/operations.h"
|
||||
#include "src/compiler/turboshaft/representations.h"
|
||||
#include "src/numbers/conversions.h"
|
||||
|
||||
namespace v8::internal::compiler::turboshaft {
|
||||
@ -47,9 +48,6 @@ class MachineOptimizationReducer : public Next {
|
||||
public:
|
||||
using Next::Asm;
|
||||
|
||||
MachineOptimizationReducer(Graph* graph, Zone* phase_zone)
|
||||
: Next(graph, phase_zone) {}
|
||||
|
||||
OpIndex ReduceChange(OpIndex input, ChangeOp::Kind kind,
|
||||
ChangeOp::Assumption assumption,
|
||||
RegisterRepresentation from, RegisterRepresentation to) {
|
||||
|
@ -14,21 +14,21 @@ class OperationMatching {
|
||||
public:
|
||||
template <class Op>
|
||||
bool Is(OpIndex op_idx) {
|
||||
return assembler().graph().Get(op_idx).template Is<Op>();
|
||||
return assembler().output_graph().Get(op_idx).template Is<Op>();
|
||||
}
|
||||
|
||||
template <class Op>
|
||||
const Op* TryCast(OpIndex op_idx) {
|
||||
return assembler().graph().Get(op_idx).template TryCast<Op>();
|
||||
return assembler().output_graph().Get(op_idx).template TryCast<Op>();
|
||||
}
|
||||
|
||||
template <class Op>
|
||||
const Op& Cast(OpIndex op_idx) {
|
||||
return assembler().graph().Get(op_idx).template Cast<Op>();
|
||||
return assembler().output_graph().Get(op_idx).template Cast<Op>();
|
||||
}
|
||||
|
||||
const Operation& Get(OpIndex op_idx) {
|
||||
return assembler().graph().Get(op_idx);
|
||||
return assembler().output_graph().Get(op_idx);
|
||||
}
|
||||
|
||||
bool MatchZero(OpIndex matched) {
|
||||
|
@ -42,6 +42,8 @@ struct AnalyzerBase {
|
||||
: phase_zone(phase_zone), graph(graph) {}
|
||||
};
|
||||
|
||||
// TODO(dmercadier, tebbi): transform this analyzer into a reducer, and plug in
|
||||
// into some reducer stacks.
|
||||
struct LivenessAnalyzer : AnalyzerBase {
|
||||
using Base = AnalyzerBase;
|
||||
// Using `uint8_t` instead of `bool` prevents `std::vector` from using a
|
||||
@ -100,170 +102,168 @@ struct LivenessAnalyzer : AnalyzerBase {
|
||||
}
|
||||
};
|
||||
|
||||
enum class VisitOrder { kAsEmitted, kDominator };
|
||||
|
||||
template <template <class> class... Reducers>
|
||||
class OptimizationPhase {
|
||||
private:
|
||||
struct Impl;
|
||||
|
||||
public:
|
||||
static void Run(Graph* input, Zone* phase_zone, NodeOriginTable* origins,
|
||||
VisitOrder visit_order = VisitOrder::kAsEmitted) {
|
||||
Impl phase{*input, phase_zone, origins, visit_order};
|
||||
static void Run(Graph* input, Zone* phase_zone, NodeOriginTable* origins) {
|
||||
Assembler<Reducers...> phase(*input, input->GetOrCreateCompanion(),
|
||||
phase_zone, origins);
|
||||
if (v8_flags.turboshaft_trace_reduction) {
|
||||
phase.template Run<true>();
|
||||
phase.template VisitGraph<true>();
|
||||
} else {
|
||||
phase.template Run<false>();
|
||||
phase.template VisitGraph<false>();
|
||||
}
|
||||
}
|
||||
static void RunWithoutTracing(
|
||||
Graph* input, Zone* phase_zone,
|
||||
VisitOrder visit_order = VisitOrder::kAsEmitted) {
|
||||
Impl phase{*input, phase_zone, visit_order};
|
||||
phase.template Run<false>();
|
||||
static void RunWithoutTracing(Graph* input, Zone* phase_zone) {
|
||||
Assembler<Reducers...> phase(input, input->GetOrCreateCompanion(),
|
||||
phase_zone);
|
||||
phase->template VisitGraph<false>();
|
||||
}
|
||||
};
|
||||
|
||||
template <template <class> class... Reducers>
|
||||
struct OptimizationPhase<Reducers...>::Impl {
|
||||
Graph& input_graph;
|
||||
Zone* phase_zone;
|
||||
compiler::NodeOriginTable* origins;
|
||||
VisitOrder visit_order;
|
||||
|
||||
LivenessAnalyzer analyzer{input_graph, phase_zone};
|
||||
Assembler<Reducers...> assembler{&input_graph.GetOrCreateCompanion(),
|
||||
phase_zone};
|
||||
const Block* current_input_block = nullptr;
|
||||
// Mappings from the old graph to the new graph.
|
||||
std::vector<Block*> block_mapping{input_graph.block_count(), nullptr};
|
||||
std::vector<OpIndex> op_mapping{input_graph.op_id_count(),
|
||||
OpIndex::Invalid()};
|
||||
template <class Assembler>
|
||||
class GraphVisitor {
|
||||
public:
|
||||
GraphVisitor(Graph& input_graph, Graph& output_graph, Zone* phase_zone,
|
||||
compiler::NodeOriginTable* origins = nullptr)
|
||||
: input_graph_(input_graph),
|
||||
output_graph_(output_graph),
|
||||
phase_zone_(phase_zone),
|
||||
origins_(origins),
|
||||
current_input_block_(nullptr),
|
||||
block_mapping_(input_graph.block_count(), nullptr, phase_zone),
|
||||
op_mapping_(input_graph.op_id_count(), OpIndex::Invalid(), phase_zone) {
|
||||
output_graph_.Reset();
|
||||
}
|
||||
|
||||
// `trace_reduction` is a template parameter to avoid paying for tracing at
|
||||
// runtime.
|
||||
template <bool trace_reduction>
|
||||
void Run() {
|
||||
analyzer.Run();
|
||||
|
||||
for (const Block& input_block : input_graph.blocks()) {
|
||||
block_mapping[input_block.index().id()] =
|
||||
assembler.NewBlock(input_block.kind());
|
||||
void VisitGraph() {
|
||||
// Creating initial old-to-new Block mapping.
|
||||
for (const Block& input_block : input_graph().blocks()) {
|
||||
block_mapping_[input_block.index().id()] =
|
||||
assembler().NewBlock(input_block.kind());
|
||||
}
|
||||
|
||||
if (visit_order == VisitOrder::kDominator) {
|
||||
RunDominatorOrder<trace_reduction>();
|
||||
} else {
|
||||
RunAsEmittedOrder<trace_reduction>();
|
||||
}
|
||||
// Visiting the graph.
|
||||
VisitAllBlocks<trace_reduction>();
|
||||
|
||||
if (!input_graph.source_positions().empty()) {
|
||||
for (OpIndex index : assembler.graph().AllOperationIndices()) {
|
||||
OpIndex origin = assembler.graph().operation_origins()[index];
|
||||
assembler.graph().source_positions()[index] =
|
||||
input_graph.source_positions()[origin];
|
||||
// Updating the source_positions.
|
||||
if (!input_graph().source_positions().empty()) {
|
||||
for (OpIndex index : output_graph_.AllOperationIndices()) {
|
||||
OpIndex origin = output_graph_.operation_origins()[index];
|
||||
output_graph_.source_positions()[index] =
|
||||
input_graph().source_positions()[origin];
|
||||
}
|
||||
}
|
||||
if (origins) {
|
||||
for (OpIndex index : assembler.graph().AllOperationIndices()) {
|
||||
OpIndex origin = assembler.graph().operation_origins()[index];
|
||||
origins->SetNodeOrigin(index.id(), origin.id());
|
||||
// Updating the operation origins.
|
||||
if (origins_) {
|
||||
for (OpIndex index : assembler().output_graph().AllOperationIndices()) {
|
||||
OpIndex origin = assembler().output_graph().operation_origins()[index];
|
||||
origins_->SetNodeOrigin(index.id(), origin.id());
|
||||
}
|
||||
}
|
||||
|
||||
input_graph.SwapWithCompanion();
|
||||
input_graph_.SwapWithCompanion();
|
||||
}
|
||||
|
||||
template <bool trace_reduction>
|
||||
void RunAsEmittedOrder() {
|
||||
for (const Block& input_block : input_graph.blocks()) {
|
||||
VisitBlock<trace_reduction>(input_block);
|
||||
}
|
||||
}
|
||||
Zone* graph_zone() const { return input_graph().graph_zone(); }
|
||||
const Graph& input_graph() const { return input_graph_; }
|
||||
Graph& output_graph() const { return output_graph_; }
|
||||
Zone* phase_zone() { return phase_zone_; }
|
||||
|
||||
private:
|
||||
template <bool trace_reduction>
|
||||
void RunDominatorOrder() {
|
||||
base::SmallVector<Block*, 128> dominator_visit_stack;
|
||||
void VisitAllBlocks() {
|
||||
base::SmallVector<const Block*, 128> visit_stack;
|
||||
|
||||
dominator_visit_stack.push_back(input_graph.GetPtr(0));
|
||||
while (!dominator_visit_stack.empty()) {
|
||||
Block* block = dominator_visit_stack.back();
|
||||
dominator_visit_stack.pop_back();
|
||||
VisitBlock<trace_reduction>(*block);
|
||||
visit_stack.push_back(&input_graph().StartBlock());
|
||||
while (!visit_stack.empty()) {
|
||||
const Block* block = visit_stack.back();
|
||||
visit_stack.pop_back();
|
||||
VisitBlock<trace_reduction>(block);
|
||||
|
||||
for (Block* child = block->LastChild(); child != nullptr;
|
||||
child = child->NeighboringChild()) {
|
||||
dominator_visit_stack.push_back(child);
|
||||
visit_stack.push_back(child);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <bool trace_reduction>
|
||||
void VisitBlock(const Block& input_block) {
|
||||
current_input_block = &input_block;
|
||||
void VisitBlock(const Block* input_block) {
|
||||
current_input_block_ = input_block;
|
||||
if constexpr (trace_reduction) {
|
||||
std::cout << PrintAsBlockHeader{input_block} << "\n";
|
||||
std::cout << PrintAsBlockHeader{*input_block} << "\n";
|
||||
}
|
||||
if (!assembler.Bind(MapToNewGraph(input_block.index()))) {
|
||||
if (!assembler().Bind(MapToNewGraph(input_block->index()))) {
|
||||
if constexpr (trace_reduction) TraceBlockUnreachable();
|
||||
// If we eliminate a loop backedge, we need to turn the loop into a
|
||||
// single-predecessor merge block.
|
||||
const Operation& last_op =
|
||||
*base::Reversed(input_graph.operations(input_block)).begin();
|
||||
*base::Reversed(input_graph().operations(*input_block)).begin();
|
||||
if (auto* final_goto = last_op.TryCast<GotoOp>()) {
|
||||
if (final_goto->destination->IsLoop()) {
|
||||
Block* new_loop = MapToNewGraph(final_goto->destination->index());
|
||||
DCHECK(new_loop->IsLoop());
|
||||
if (new_loop->IsLoop() && new_loop->PredecessorCount() == 1) {
|
||||
assembler.graph().TurnLoopIntoMerge(new_loop);
|
||||
output_graph_.TurnLoopIntoMerge(new_loop);
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
assembler.current_block()->SetDeferred(input_block.IsDeferred());
|
||||
for (OpIndex index : input_graph.OperationIndices(input_block)) {
|
||||
if (!assembler.current_block()) break;
|
||||
assembler.SetCurrentOrigin(index);
|
||||
OpIndex first_output_index = assembler.graph().next_operation_index();
|
||||
USE(first_output_index);
|
||||
if constexpr (trace_reduction) TraceReductionStart(index);
|
||||
if (!analyzer.OpIsUsed(index)) {
|
||||
if constexpr (trace_reduction) TraceOperationUnused();
|
||||
continue;
|
||||
}
|
||||
const Operation& op = input_graph.Get(index);
|
||||
OpIndex new_index;
|
||||
if (input_block.IsLoop() && op.Is<PhiOp>()) {
|
||||
const PhiOp& phi = op.Cast<PhiOp>();
|
||||
new_index = assembler.PendingLoopPhi(MapToNewGraph(phi.inputs()[0]),
|
||||
phi.rep, phi.inputs()[1]);
|
||||
if constexpr (trace_reduction) {
|
||||
TraceReductionResult(first_output_index, new_index);
|
||||
}
|
||||
} else {
|
||||
switch (op.opcode) {
|
||||
#define EMIT_INSTR_CASE(Name) \
|
||||
case Opcode::k##Name: \
|
||||
new_index = this->Reduce##Name(op.Cast<Name##Op>()); \
|
||||
break;
|
||||
TURBOSHAFT_OPERATION_LIST(EMIT_INSTR_CASE)
|
||||
#undef EMIT_INSTR_CASE
|
||||
}
|
||||
if constexpr (trace_reduction) {
|
||||
TraceReductionResult(first_output_index, new_index);
|
||||
}
|
||||
}
|
||||
op_mapping[index.id()] = new_index;
|
||||
assembler().current_block()->SetDeferred(input_block->IsDeferred());
|
||||
for (OpIndex index : input_graph().OperationIndices(*input_block)) {
|
||||
if (!VisitOp<trace_reduction>(index, input_block)) break;
|
||||
}
|
||||
if constexpr (trace_reduction) TraceBlockFinished();
|
||||
}
|
||||
|
||||
template <bool trace_reduction>
|
||||
bool VisitOp(OpIndex index, const Block* input_block) {
|
||||
if (!assembler().current_block()) return false;
|
||||
assembler().SetCurrentOrigin(index);
|
||||
OpIndex first_output_index =
|
||||
assembler().output_graph().next_operation_index();
|
||||
USE(first_output_index);
|
||||
const Operation& op = input_graph().Get(index);
|
||||
if (op.saturated_use_count == 0 &&
|
||||
!op.Properties().is_required_when_unused) {
|
||||
if constexpr (trace_reduction) TraceOperationUnused();
|
||||
return true;
|
||||
}
|
||||
if constexpr (trace_reduction) TraceReductionStart(index);
|
||||
OpIndex new_index;
|
||||
if (input_block->IsLoop() && op.Is<PhiOp>()) {
|
||||
const PhiOp& phi = op.Cast<PhiOp>();
|
||||
new_index = assembler().PendingLoopPhi(MapToNewGraph(phi.inputs()[0]),
|
||||
phi.rep, phi.inputs()[1]);
|
||||
if constexpr (trace_reduction) {
|
||||
TraceReductionResult(first_output_index, new_index);
|
||||
}
|
||||
} else {
|
||||
switch (op.opcode) {
|
||||
#define EMIT_INSTR_CASE(Name) \
|
||||
case Opcode::k##Name: \
|
||||
new_index = this->Visit##Name(op.Cast<Name##Op>()); \
|
||||
break;
|
||||
TURBOSHAFT_OPERATION_LIST(EMIT_INSTR_CASE)
|
||||
#undef EMIT_INSTR_CASE
|
||||
}
|
||||
if constexpr (trace_reduction) {
|
||||
TraceReductionResult(first_output_index, new_index);
|
||||
}
|
||||
}
|
||||
op_mapping_[index.id()] = new_index;
|
||||
return true;
|
||||
}
|
||||
|
||||
void TraceReductionStart(OpIndex index) {
|
||||
std::cout << "╭── o" << index.id() << ": "
|
||||
<< PaddingSpace{5 - CountDecimalDigits(index.id())}
|
||||
<< OperationPrintStyle{input_graph.Get(index), "#o"} << "\n";
|
||||
<< OperationPrintStyle{input_graph().Get(index), "#o"} << "\n";
|
||||
}
|
||||
void TraceOperationUnused() { std::cout << "╰─> unused\n\n"; }
|
||||
void TraceBlockUnreachable() { std::cout << "╰─> unreachable\n\n"; }
|
||||
@ -273,9 +273,9 @@ struct OptimizationPhase<Reducers...>::Impl {
|
||||
std::cout << "╰─> #n" << new_index.id() << "\n";
|
||||
}
|
||||
bool before_arrow = new_index >= first_output_index;
|
||||
for (const Operation& op : assembler.graph().operations(
|
||||
first_output_index, assembler.graph().next_operation_index())) {
|
||||
OpIndex index = assembler.graph().Index(op);
|
||||
for (const Operation& op : output_graph_.operations(
|
||||
first_output_index, output_graph_.next_operation_index())) {
|
||||
OpIndex index = output_graph_.Index(op);
|
||||
const char* prefix;
|
||||
if (index == new_index) {
|
||||
prefix = "╰─>";
|
||||
@ -287,8 +287,7 @@ struct OptimizationPhase<Reducers...>::Impl {
|
||||
}
|
||||
std::cout << prefix << " n" << index.id() << ": "
|
||||
<< PaddingSpace{5 - CountDecimalDigits(index.id())}
|
||||
<< OperationPrintStyle{assembler.graph().Get(index), "#n"}
|
||||
<< "\n";
|
||||
<< OperationPrintStyle{output_graph_.Get(index), "#n"} << "\n";
|
||||
}
|
||||
std::cout << "\n";
|
||||
}
|
||||
@ -298,51 +297,51 @@ struct OptimizationPhase<Reducers...>::Impl {
|
||||
// to emit a corresponding operation in the new graph, translating inputs and
|
||||
// blocks accordingly.
|
||||
|
||||
V8_INLINE OpIndex ReduceGoto(const GotoOp& op) {
|
||||
V8_INLINE OpIndex VisitGoto(const GotoOp& op) {
|
||||
Block* destination = MapToNewGraph(op.destination->index());
|
||||
assembler.current_block()->SetOrigin(current_input_block);
|
||||
assembler.ReduceGoto(destination);
|
||||
assembler().current_block()->SetOrigin(current_input_block_);
|
||||
assembler().ReduceGoto(destination);
|
||||
if (destination->IsBound()) {
|
||||
DCHECK(destination->IsLoop());
|
||||
FixLoopPhis(destination);
|
||||
}
|
||||
return OpIndex::Invalid();
|
||||
}
|
||||
V8_INLINE OpIndex ReduceBranch(const BranchOp& op) {
|
||||
V8_INLINE OpIndex VisitBranch(const BranchOp& op) {
|
||||
Block* if_true = MapToNewGraph(op.if_true->index());
|
||||
Block* if_false = MapToNewGraph(op.if_false->index());
|
||||
return assembler.ReduceBranch(MapToNewGraph(op.condition()), if_true,
|
||||
if_false);
|
||||
return assembler().ReduceBranch(MapToNewGraph(op.condition()), if_true,
|
||||
if_false);
|
||||
}
|
||||
OpIndex ReduceCatchException(const CatchExceptionOp& op) {
|
||||
OpIndex VisitCatchException(const CatchExceptionOp& op) {
|
||||
Block* if_success = MapToNewGraph(op.if_success->index());
|
||||
Block* if_exception = MapToNewGraph(op.if_exception->index());
|
||||
return assembler.ReduceCatchException(MapToNewGraph(op.call()), if_success,
|
||||
if_exception);
|
||||
return assembler().ReduceCatchException(MapToNewGraph(op.call()),
|
||||
if_success, if_exception);
|
||||
}
|
||||
OpIndex ReduceSwitch(const SwitchOp& op) {
|
||||
OpIndex VisitSwitch(const SwitchOp& op) {
|
||||
base::SmallVector<SwitchOp::Case, 16> cases;
|
||||
for (SwitchOp::Case c : op.cases) {
|
||||
cases.emplace_back(c.value, MapToNewGraph(c.destination->index()));
|
||||
}
|
||||
return assembler.ReduceSwitch(
|
||||
return assembler().ReduceSwitch(
|
||||
MapToNewGraph(op.input()),
|
||||
assembler.graph_zone()->CloneVector(base::VectorOf(cases)),
|
||||
graph_zone()->CloneVector(base::VectorOf(cases)),
|
||||
MapToNewGraph(op.default_case->index()));
|
||||
}
|
||||
OpIndex ReducePhi(const PhiOp& op) {
|
||||
OpIndex VisitPhi(const PhiOp& op) {
|
||||
base::Vector<const OpIndex> old_inputs = op.inputs();
|
||||
base::SmallVector<OpIndex, 8> new_inputs;
|
||||
Block* old_pred = current_input_block->LastPredecessor();
|
||||
Block* new_pred = assembler.current_block()->LastPredecessor();
|
||||
Block* old_pred = current_input_block_->LastPredecessor();
|
||||
Block* new_pred = assembler().current_block()->LastPredecessor();
|
||||
// Control predecessors might be missing after the optimization phase. So we
|
||||
// need to skip phi inputs that belong to control predecessors that have no
|
||||
// equivalent in the new graph.
|
||||
|
||||
// When iterating the graph in kAsEmitted order (ie, going through all of
|
||||
// the blocks in linear order), we assume that the order of control
|
||||
// predecessors did not change. In kDominator order, the order of control
|
||||
// predecessor might or might not change.
|
||||
// We first assume that the order if the predecessors of the current block
|
||||
// did not change. If it did, {new_pred} won't be nullptr at the end of this
|
||||
// loop, and we'll instead fall back to the slower code below to compute the
|
||||
// inputs of the Phi.
|
||||
for (OpIndex input : base::Reversed(old_inputs)) {
|
||||
if (new_pred && new_pred->Origin() == old_pred) {
|
||||
new_inputs.push_back(MapToNewGraph(input));
|
||||
@ -353,10 +352,9 @@ struct OptimizationPhase<Reducers...>::Impl {
|
||||
DCHECK_IMPLIES(new_pred == nullptr, old_pred == nullptr);
|
||||
|
||||
if (new_pred != nullptr) {
|
||||
DCHECK_EQ(visit_order, VisitOrder::kDominator);
|
||||
// If {new_pred} is nullptr, then the order of the predecessors changed.
|
||||
// This should only happen when {visit_order} is kDominator. For instance,
|
||||
// consider this (partial) dominator tree:
|
||||
// This should only happen with blocks that were introduced in the
|
||||
// previous graph. For instance, consider this (partial) dominator tree:
|
||||
//
|
||||
// ╠ 7
|
||||
// ║ ╠ 8
|
||||
@ -365,7 +363,7 @@ struct OptimizationPhase<Reducers...>::Impl {
|
||||
// ╚ 11
|
||||
//
|
||||
// Where the predecessors of block 11 are blocks 9 and 10 (in that order).
|
||||
// In kDominator visit order, block 10 will be visited before block 9.
|
||||
// In dominator visit order, block 10 will be visited before block 9.
|
||||
// Since blocks are added to predecessors when the predecessors are
|
||||
// visited, it means that in the new graph, the predecessors of block 11
|
||||
// are [10, 9] rather than [9, 10].
|
||||
@ -373,7 +371,7 @@ struct OptimizationPhase<Reducers...>::Impl {
|
||||
// inputs from blocks that vanished.
|
||||
|
||||
base::SmallVector<uint32_t, 16> old_pred_vec;
|
||||
for (old_pred = current_input_block->LastPredecessor();
|
||||
for (old_pred = current_input_block_->LastPredecessor();
|
||||
old_pred != nullptr; old_pred = old_pred->NeighboringPredecessor()) {
|
||||
old_pred_vec.push_back(old_pred->index().id());
|
||||
// Checking that predecessors are indeed sorted.
|
||||
@ -387,9 +385,10 @@ struct OptimizationPhase<Reducers...>::Impl {
|
||||
// predecessor, we check the index of the input corresponding to the old
|
||||
// predecessor, and we put it next in {new_inputs}.
|
||||
new_inputs.clear();
|
||||
for (new_pred = assembler.current_block()->LastPredecessor();
|
||||
for (new_pred = assembler().current_block()->LastPredecessor();
|
||||
new_pred != nullptr; new_pred = new_pred->NeighboringPredecessor()) {
|
||||
const Block* origin = new_pred->Origin();
|
||||
DCHECK_NOT_NULL(origin);
|
||||
// {old_pred_vec} is sorted. We can thus use a binary search to find the
|
||||
// index of {origin} in {old_pred_vec}: the index is the index of the
|
||||
// old input corresponding to {new_pred}.
|
||||
@ -401,151 +400,168 @@ struct OptimizationPhase<Reducers...>::Impl {
|
||||
}
|
||||
}
|
||||
|
||||
DCHECK_EQ(new_inputs.size(),
|
||||
assembler().current_block()->PredecessorCount());
|
||||
|
||||
if (new_inputs.size() == 1) {
|
||||
// This Operation used to be a Phi in a Merge, but since one (or more) of
|
||||
// the inputs of the merge have been removed, there is no need for a Phi
|
||||
// anymore.
|
||||
return new_inputs[0];
|
||||
}
|
||||
|
||||
std::reverse(new_inputs.begin(), new_inputs.end());
|
||||
return assembler.ReducePhi(base::VectorOf(new_inputs), op.rep);
|
||||
return assembler().ReducePhi(base::VectorOf(new_inputs), op.rep);
|
||||
}
|
||||
OpIndex ReducePendingLoopPhi(const PendingLoopPhiOp& op) { UNREACHABLE(); }
|
||||
V8_INLINE OpIndex ReduceFrameState(const FrameStateOp& op) {
|
||||
OpIndex VisitPendingLoopPhi(const PendingLoopPhiOp& op) { UNREACHABLE(); }
|
||||
V8_INLINE OpIndex VisitFrameState(const FrameStateOp& op) {
|
||||
auto inputs = MapToNewGraph<32>(op.inputs());
|
||||
return assembler.ReduceFrameState(base::VectorOf(inputs), op.inlined,
|
||||
op.data);
|
||||
return assembler().ReduceFrameState(base::VectorOf(inputs), op.inlined,
|
||||
op.data);
|
||||
}
|
||||
OpIndex ReduceCall(const CallOp& op) {
|
||||
OpIndex VisitCall(const CallOp& op) {
|
||||
OpIndex callee = MapToNewGraph(op.callee());
|
||||
auto arguments = MapToNewGraph<16>(op.arguments());
|
||||
return assembler.ReduceCall(callee, base::VectorOf(arguments),
|
||||
op.descriptor);
|
||||
return assembler().ReduceCall(callee, base::VectorOf(arguments),
|
||||
op.descriptor);
|
||||
}
|
||||
OpIndex ReduceTailCall(const TailCallOp& op) {
|
||||
OpIndex VisitTailCall(const TailCallOp& op) {
|
||||
OpIndex callee = MapToNewGraph(op.callee());
|
||||
auto arguments = MapToNewGraph<16>(op.arguments());
|
||||
return assembler.ReduceTailCall(callee, base::VectorOf(arguments),
|
||||
op.descriptor);
|
||||
return assembler().ReduceTailCall(callee, base::VectorOf(arguments),
|
||||
op.descriptor);
|
||||
}
|
||||
OpIndex ReduceReturn(const ReturnOp& op) {
|
||||
OpIndex VisitReturn(const ReturnOp& op) {
|
||||
// We very rarely have tuples longer than 4.
|
||||
auto return_values = MapToNewGraph<4>(op.return_values());
|
||||
return assembler.ReduceReturn(MapToNewGraph(op.pop_count()),
|
||||
base::VectorOf(return_values));
|
||||
return assembler().ReduceReturn(MapToNewGraph(op.pop_count()),
|
||||
base::VectorOf(return_values));
|
||||
}
|
||||
OpIndex ReduceOverflowCheckedBinop(const OverflowCheckedBinopOp& op) {
|
||||
return assembler.ReduceOverflowCheckedBinop(
|
||||
OpIndex VisitOverflowCheckedBinop(const OverflowCheckedBinopOp& op) {
|
||||
return assembler().ReduceOverflowCheckedBinop(
|
||||
MapToNewGraph(op.left()), MapToNewGraph(op.right()), op.kind, op.rep);
|
||||
}
|
||||
OpIndex ReduceWordUnary(const WordUnaryOp& op) {
|
||||
return assembler.ReduceWordUnary(MapToNewGraph(op.input()), op.kind,
|
||||
op.rep);
|
||||
OpIndex VisitWordUnary(const WordUnaryOp& op) {
|
||||
return assembler().ReduceWordUnary(MapToNewGraph(op.input()), op.kind,
|
||||
op.rep);
|
||||
}
|
||||
OpIndex ReduceFloatUnary(const FloatUnaryOp& op) {
|
||||
return assembler.ReduceFloatUnary(MapToNewGraph(op.input()), op.kind,
|
||||
op.rep);
|
||||
OpIndex VisitFloatUnary(const FloatUnaryOp& op) {
|
||||
return assembler().ReduceFloatUnary(MapToNewGraph(op.input()), op.kind,
|
||||
op.rep);
|
||||
}
|
||||
OpIndex ReduceShift(const ShiftOp& op) {
|
||||
return assembler.ReduceShift(MapToNewGraph(op.left()),
|
||||
MapToNewGraph(op.right()), op.kind, op.rep);
|
||||
OpIndex VisitShift(const ShiftOp& op) {
|
||||
return assembler().ReduceShift(MapToNewGraph(op.left()),
|
||||
MapToNewGraph(op.right()), op.kind, op.rep);
|
||||
}
|
||||
OpIndex ReduceEqual(const EqualOp& op) {
|
||||
return assembler.ReduceEqual(MapToNewGraph(op.left()),
|
||||
MapToNewGraph(op.right()), op.rep);
|
||||
OpIndex VisitEqual(const EqualOp& op) {
|
||||
return assembler().ReduceEqual(MapToNewGraph(op.left()),
|
||||
MapToNewGraph(op.right()), op.rep);
|
||||
}
|
||||
OpIndex ReduceComparison(const ComparisonOp& op) {
|
||||
return assembler.ReduceComparison(
|
||||
OpIndex VisitComparison(const ComparisonOp& op) {
|
||||
return assembler().ReduceComparison(
|
||||
MapToNewGraph(op.left()), MapToNewGraph(op.right()), op.kind, op.rep);
|
||||
}
|
||||
OpIndex ReduceChange(const ChangeOp& op) {
|
||||
return assembler.ReduceChange(MapToNewGraph(op.input()), op.kind,
|
||||
op.assumption, op.from, op.to);
|
||||
OpIndex VisitChange(const ChangeOp& op) {
|
||||
return assembler().ReduceChange(MapToNewGraph(op.input()), op.kind,
|
||||
op.assumption, op.from, op.to);
|
||||
}
|
||||
OpIndex ReduceTryChange(const TryChangeOp& op) {
|
||||
return assembler.ReduceTryChange(MapToNewGraph(op.input()), op.kind,
|
||||
op.from, op.to);
|
||||
OpIndex VisitTryChange(const TryChangeOp& op) {
|
||||
return assembler().ReduceTryChange(MapToNewGraph(op.input()), op.kind,
|
||||
op.from, op.to);
|
||||
}
|
||||
|
||||
OpIndex ReduceFloat64InsertWord32(const Float64InsertWord32Op& op) {
|
||||
return assembler.ReduceFloat64InsertWord32(
|
||||
OpIndex VisitFloat64InsertWord32(const Float64InsertWord32Op& op) {
|
||||
return assembler().ReduceFloat64InsertWord32(
|
||||
MapToNewGraph(op.float64()), MapToNewGraph(op.word32()), op.kind);
|
||||
}
|
||||
OpIndex ReduceTaggedBitcast(const TaggedBitcastOp& op) {
|
||||
return assembler.ReduceTaggedBitcast(MapToNewGraph(op.input()), op.from,
|
||||
op.to);
|
||||
OpIndex VisitTaggedBitcast(const TaggedBitcastOp& op) {
|
||||
return assembler().ReduceTaggedBitcast(MapToNewGraph(op.input()), op.from,
|
||||
op.to);
|
||||
}
|
||||
OpIndex ReduceSelect(const SelectOp& op) {
|
||||
return assembler.ReduceSelect(
|
||||
OpIndex VisitSelect(const SelectOp& op) {
|
||||
return assembler().ReduceSelect(
|
||||
MapToNewGraph(op.cond()), MapToNewGraph(op.vtrue()),
|
||||
MapToNewGraph(op.vfalse()), op.rep, op.hint, op.implem);
|
||||
}
|
||||
OpIndex ReduceConstant(const ConstantOp& op) {
|
||||
return assembler.ReduceConstant(op.kind, op.storage);
|
||||
OpIndex VisitConstant(const ConstantOp& op) {
|
||||
return assembler().ReduceConstant(op.kind, op.storage);
|
||||
}
|
||||
OpIndex ReduceLoad(const LoadOp& op) {
|
||||
return assembler.ReduceLoad(
|
||||
OpIndex VisitLoad(const LoadOp& op) {
|
||||
return assembler().ReduceLoad(
|
||||
MapToNewGraph(op.base()),
|
||||
op.index().valid() ? MapToNewGraph(op.index()) : OpIndex::Invalid(),
|
||||
op.kind, op.loaded_rep, op.result_rep, op.offset, op.element_size_log2);
|
||||
}
|
||||
OpIndex ReduceStore(const StoreOp& op) {
|
||||
return assembler.ReduceStore(
|
||||
OpIndex VisitStore(const StoreOp& op) {
|
||||
return assembler().ReduceStore(
|
||||
MapToNewGraph(op.base()),
|
||||
op.index().valid() ? MapToNewGraph(op.index()) : OpIndex::Invalid(),
|
||||
MapToNewGraph(op.value()), op.kind, op.stored_rep, op.write_barrier,
|
||||
op.offset, op.element_size_log2);
|
||||
}
|
||||
OpIndex ReduceRetain(const RetainOp& op) {
|
||||
return assembler.ReduceRetain(MapToNewGraph(op.retained()));
|
||||
OpIndex VisitRetain(const RetainOp& op) {
|
||||
return assembler().ReduceRetain(MapToNewGraph(op.retained()));
|
||||
}
|
||||
OpIndex ReduceParameter(const ParameterOp& op) {
|
||||
return assembler.ReduceParameter(op.parameter_index, op.debug_name);
|
||||
OpIndex VisitParameter(const ParameterOp& op) {
|
||||
return assembler().ReduceParameter(op.parameter_index, op.debug_name);
|
||||
}
|
||||
OpIndex ReduceOsrValue(const OsrValueOp& op) {
|
||||
return assembler.ReduceOsrValue(op.index);
|
||||
OpIndex VisitOsrValue(const OsrValueOp& op) {
|
||||
return assembler().ReduceOsrValue(op.index);
|
||||
}
|
||||
OpIndex ReduceStackPointerGreaterThan(const StackPointerGreaterThanOp& op) {
|
||||
return assembler.ReduceStackPointerGreaterThan(
|
||||
OpIndex VisitStackPointerGreaterThan(const StackPointerGreaterThanOp& op) {
|
||||
return assembler().ReduceStackPointerGreaterThan(
|
||||
MapToNewGraph(op.stack_limit()), op.kind);
|
||||
}
|
||||
OpIndex ReduceStackSlot(const StackSlotOp& op) {
|
||||
return assembler.ReduceStackSlot(op.size, op.alignment);
|
||||
OpIndex VisitStackSlot(const StackSlotOp& op) {
|
||||
return assembler().ReduceStackSlot(op.size, op.alignment);
|
||||
}
|
||||
OpIndex ReduceFrameConstant(const FrameConstantOp& op) {
|
||||
return assembler.ReduceFrameConstant(op.kind);
|
||||
OpIndex VisitFrameConstant(const FrameConstantOp& op) {
|
||||
return assembler().ReduceFrameConstant(op.kind);
|
||||
}
|
||||
OpIndex ReduceCheckLazyDeopt(const CheckLazyDeoptOp& op) {
|
||||
return assembler.ReduceCheckLazyDeopt(MapToNewGraph(op.call()),
|
||||
MapToNewGraph(op.frame_state()));
|
||||
OpIndex VisitCheckLazyDeopt(const CheckLazyDeoptOp& op) {
|
||||
return assembler().ReduceCheckLazyDeopt(MapToNewGraph(op.call()),
|
||||
MapToNewGraph(op.frame_state()));
|
||||
}
|
||||
OpIndex ReduceDeoptimize(const DeoptimizeOp& op) {
|
||||
return assembler.ReduceDeoptimize(MapToNewGraph(op.frame_state()),
|
||||
op.parameters);
|
||||
OpIndex VisitDeoptimize(const DeoptimizeOp& op) {
|
||||
return assembler().ReduceDeoptimize(MapToNewGraph(op.frame_state()),
|
||||
op.parameters);
|
||||
}
|
||||
OpIndex ReduceDeoptimizeIf(const DeoptimizeIfOp& op) {
|
||||
return assembler.ReduceDeoptimizeIf(MapToNewGraph(op.condition()),
|
||||
MapToNewGraph(op.frame_state()),
|
||||
op.negated, op.parameters);
|
||||
OpIndex VisitDeoptimizeIf(const DeoptimizeIfOp& op) {
|
||||
return assembler().ReduceDeoptimizeIf(MapToNewGraph(op.condition()),
|
||||
MapToNewGraph(op.frame_state()),
|
||||
op.negated, op.parameters);
|
||||
}
|
||||
OpIndex ReduceTrapIf(const TrapIfOp& op) {
|
||||
return assembler.ReduceTrapIf(MapToNewGraph(op.condition()), op.negated,
|
||||
op.trap_id);
|
||||
OpIndex VisitTrapIf(const TrapIfOp& op) {
|
||||
return assembler().ReduceTrapIf(MapToNewGraph(op.condition()), op.negated,
|
||||
op.trap_id);
|
||||
}
|
||||
OpIndex ReduceTuple(const TupleOp& op) {
|
||||
return assembler.ReduceTuple(base::VectorOf(MapToNewGraph<4>(op.inputs())));
|
||||
OpIndex VisitTuple(const TupleOp& op) {
|
||||
return assembler().ReduceTuple(
|
||||
base::VectorOf(MapToNewGraph<4>(op.inputs())));
|
||||
}
|
||||
OpIndex ReduceProjection(const ProjectionOp& op) {
|
||||
return assembler.ReduceProjection(MapToNewGraph(op.input()), op.index);
|
||||
OpIndex VisitProjection(const ProjectionOp& op) {
|
||||
return assembler().ReduceProjection(MapToNewGraph(op.input()), op.index);
|
||||
}
|
||||
OpIndex ReduceWordBinop(const WordBinopOp& op) {
|
||||
return assembler.ReduceWordBinop(
|
||||
OpIndex VisitWordBinop(const WordBinopOp& op) {
|
||||
return assembler().ReduceWordBinop(
|
||||
MapToNewGraph(op.left()), MapToNewGraph(op.right()), op.kind, op.rep);
|
||||
}
|
||||
OpIndex ReduceFloatBinop(const FloatBinopOp& op) {
|
||||
return assembler.ReduceFloatBinop(
|
||||
OpIndex VisitFloatBinop(const FloatBinopOp& op) {
|
||||
return assembler().ReduceFloatBinop(
|
||||
MapToNewGraph(op.left()), MapToNewGraph(op.right()), op.kind, op.rep);
|
||||
}
|
||||
OpIndex ReduceUnreachable(const UnreachableOp& op) {
|
||||
return assembler.ReduceUnreachable();
|
||||
OpIndex VisitUnreachable(const UnreachableOp& op) {
|
||||
return assembler().ReduceUnreachable();
|
||||
}
|
||||
|
||||
Block* MapToNewGraph(BlockIndex old_index) const {
|
||||
Block* result = block_mapping_[old_index.id()];
|
||||
DCHECK_NOT_NULL(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
OpIndex MapToNewGraph(OpIndex old_index) {
|
||||
OpIndex result = op_mapping[old_index.id()];
|
||||
OpIndex result = op_mapping_[old_index.id()];
|
||||
DCHECK(result.valid());
|
||||
return result;
|
||||
}
|
||||
@ -560,24 +576,33 @@ struct OptimizationPhase<Reducers...>::Impl {
|
||||
return result;
|
||||
}
|
||||
|
||||
Block* MapToNewGraph(BlockIndex old_index) {
|
||||
Block* result = block_mapping[old_index.id()];
|
||||
DCHECK_NOT_NULL(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void FixLoopPhis(Block* loop) {
|
||||
DCHECK(loop->IsLoop());
|
||||
for (Operation& op : assembler.graph().operations(*loop)) {
|
||||
for (Operation& op : assembler().output_graph().operations(*loop)) {
|
||||
if (auto* pending_phi = op.TryCast<PendingLoopPhiOp>()) {
|
||||
assembler.graph().template Replace<PhiOp>(
|
||||
assembler.graph().Index(*pending_phi),
|
||||
assembler().output_graph().template Replace<PhiOp>(
|
||||
assembler().output_graph().Index(*pending_phi),
|
||||
base::VectorOf({pending_phi->first(),
|
||||
MapToNewGraph(pending_phi->old_backedge_index)}),
|
||||
pending_phi->rep);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(dmercadier,tebbi): unify the ways we refer to the Assembler.
|
||||
// Currently, we have Asm(), assembler(), and to a certain extent, stack().
|
||||
Assembler& assembler() { return static_cast<Assembler&>(*this); }
|
||||
|
||||
Graph& input_graph_;
|
||||
Graph& output_graph_;
|
||||
Zone* phase_zone_;
|
||||
compiler::NodeOriginTable* origins_;
|
||||
|
||||
const Block* current_input_block_;
|
||||
|
||||
// Mappings from the old graph to the new graph.
|
||||
ZoneVector<Block*> block_mapping_;
|
||||
ZoneVector<OpIndex> op_mapping_;
|
||||
};
|
||||
|
||||
} // namespace v8::internal::compiler::turboshaft
|
||||
|
@ -34,8 +34,6 @@ template <class Next>
|
||||
class SelectLoweringReducer : public Next {
|
||||
public:
|
||||
using Next::Asm;
|
||||
SelectLoweringReducer(Graph* graph, Zone* phase_zone)
|
||||
: Next(graph, phase_zone) {}
|
||||
|
||||
OpIndex ReduceSelect(OpIndex cond, OpIndex vtrue, OpIndex vfalse,
|
||||
RegisterRepresentation rep, BranchHint hint,
|
||||
|
@ -55,7 +55,7 @@ class GrowingSidetable {
|
||||
|
||||
// Returns `true` if the table never contained any values, even before
|
||||
// `Reset()`.
|
||||
bool empty() { return table_.empty(); }
|
||||
bool empty() const { return table_.empty(); }
|
||||
|
||||
private:
|
||||
mutable ZoneVector<T> table_;
|
||||
|
@ -72,26 +72,24 @@ template <class Next>
|
||||
class ValueNumberingReducer : public Next {
|
||||
public:
|
||||
using Next::Asm;
|
||||
ValueNumberingReducer(Graph* graph, Zone* phase_zone)
|
||||
: Next(graph, phase_zone),
|
||||
dominator_path_(phase_zone),
|
||||
depths_heads_(phase_zone) {
|
||||
table_ = phase_zone->NewVector<Entry>(
|
||||
ValueNumberingReducer()
|
||||
: dominator_path_(Asm().phase_zone()), depths_heads_(Asm().phase_zone()) {
|
||||
table_ = Asm().phase_zone()->template NewVector<Entry>(
|
||||
base::bits::RoundUpToPowerOfTwo(
|
||||
std::max<size_t>(128, graph->op_id_capacity() / 2)),
|
||||
std::max<size_t>(128, Asm().input_graph().op_id_capacity() / 2)),
|
||||
Entry());
|
||||
entry_count_ = 0;
|
||||
mask_ = table_.size() - 1;
|
||||
}
|
||||
|
||||
#define EMIT_OP(Name) \
|
||||
template <class... Args> \
|
||||
OpIndex Reduce##Name(Args... args) { \
|
||||
OpIndex next_index = Asm().graph().next_operation_index(); \
|
||||
USE(next_index); \
|
||||
OpIndex result = Next::Reduce##Name(args...); \
|
||||
DCHECK_EQ(next_index, result); \
|
||||
return AddOrFind<Name##Op>(result); \
|
||||
#define EMIT_OP(Name) \
|
||||
template <class... Args> \
|
||||
OpIndex Reduce##Name(Args... args) { \
|
||||
OpIndex next_index = Asm().output_graph().next_operation_index(); \
|
||||
USE(next_index); \
|
||||
OpIndex result = Next::Reduce##Name(args...); \
|
||||
DCHECK_EQ(next_index, result); \
|
||||
return AddOrFind<Name##Op>(result); \
|
||||
}
|
||||
TURBOSHAFT_OPERATION_LIST(EMIT_OP)
|
||||
#undef EMIT_OP
|
||||
@ -133,7 +131,7 @@ class ValueNumberingReducer : public Next {
|
||||
|
||||
template <class Op>
|
||||
OpIndex AddOrFind(OpIndex op_idx) {
|
||||
const Op& op = Asm().graph().Get(op_idx).template Cast<Op>();
|
||||
const Op& op = Asm().output_graph().Get(op_idx).template Cast<Op>();
|
||||
if (std::is_same<Op, PendingLoopPhiOp>::value ||
|
||||
!op.Properties().can_be_eliminated) {
|
||||
return op_idx;
|
||||
@ -154,12 +152,12 @@ class ValueNumberingReducer : public Next {
|
||||
return op_idx;
|
||||
}
|
||||
if (entry.hash == hash) {
|
||||
const Operation& entry_op = Asm().graph().Get(entry.value);
|
||||
const Operation& entry_op = Asm().output_graph().Get(entry.value);
|
||||
if (entry_op.Is<Op>() &&
|
||||
(!same_block_only ||
|
||||
entry.block == Asm().current_block()->index()) &&
|
||||
entry_op.Cast<Op>() == op) {
|
||||
Asm().graph().RemoveLast();
|
||||
Asm().output_graph().RemoveLast();
|
||||
return entry.value;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user