[turboshaft] Basic TypedOptimization and new DeadCodeElimination

This CL introduces typed optimizations for Turboshaft, which replaces all operations that produce a constant output (and don't have side effects) by the corresponding constant.

In addition, a new pass for eliminating dead code is introduced that cannot only remove dead operations, but also rewrite branches that are not required into GotoOps.

Drive-by: Introduce -0 as a "special value" for Float32Type and Float64Type to fix a few issues where 0 and -0 have been treated as identical.

Bug: v8:12783
Change-Id: Ia1450ad7a9abb5d58c7d753596ed08a33a73184f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4110993
Reviewed-by: Darius Mercadier <dmercadier@chromium.org>
Commit-Queue: Nico Hartmann <nicohartmann@chromium.org>
Cr-Commit-Position: refs/heads/main@{#85143}
This commit is contained in:
Nico Hartmann 2023-01-09 13:23:28 +01:00 committed by V8 LUCI CQ
parent 322e42bf13
commit 88eac4b870
19 changed files with 1203 additions and 285 deletions

View File

@ -2898,6 +2898,7 @@ filegroup(
"src/compiler/turboshaft/assembler.h",
"src/compiler/turboshaft/assert-types-reducer.h",
"src/compiler/turboshaft/branch-elimination-reducer.h",
"src/compiler/turboshaft/dead-code-elimination-reducer.h",
"src/compiler/turboshaft/decompression-optimization.cc",
"src/compiler/turboshaft/decompression-optimization.h",
"src/compiler/turboshaft/deopt-data.h",
@ -2932,8 +2933,10 @@ filegroup(
"src/compiler/turboshaft/type-inference-reducer.h",
"src/compiler/turboshaft/type-parser.cc",
"src/compiler/turboshaft/type-parser.h",
"src/compiler/turboshaft/typed-optimizations-reducer.h",
"src/compiler/turboshaft/types.cc",
"src/compiler/turboshaft/types.h",
"src/compiler/turboshaft/uniform-reducer-adapter.h",
"src/compiler/turboshaft/utils.cc",
"src/compiler/turboshaft/utils.h",
"src/compiler/turboshaft/value-numbering-reducer.h",

View File

@ -2987,6 +2987,7 @@ v8_header_set("v8_internal_headers") {
"src/compiler/turboshaft/assembler.h",
"src/compiler/turboshaft/assert-types-reducer.h",
"src/compiler/turboshaft/branch-elimination-reducer.h",
"src/compiler/turboshaft/dead-code-elimination-reducer.h",
"src/compiler/turboshaft/decompression-optimization.h",
"src/compiler/turboshaft/deopt-data.h",
"src/compiler/turboshaft/fast-hash.h",
@ -3009,7 +3010,9 @@ v8_header_set("v8_internal_headers") {
"src/compiler/turboshaft/snapshot-table.h",
"src/compiler/turboshaft/type-inference-reducer.h",
"src/compiler/turboshaft/type-parser.h",
"src/compiler/turboshaft/typed-optimizations-reducer.h",
"src/compiler/turboshaft/types.h",
"src/compiler/turboshaft/uniform-reducer-adapater.h",
"src/compiler/turboshaft/utils.h",
"src/compiler/turboshaft/value-numbering-reducer.h",
"src/compiler/turboshaft/variable-reducer.h",

View File

@ -82,10 +82,12 @@
#include "src/compiler/turboshaft/assembler.h"
#include "src/compiler/turboshaft/assert-types-reducer.h"
#include "src/compiler/turboshaft/branch-elimination-reducer.h"
#include "src/compiler/turboshaft/dead-code-elimination-reducer.h"
#include "src/compiler/turboshaft/decompression-optimization.h"
#include "src/compiler/turboshaft/graph-builder.h"
#include "src/compiler/turboshaft/graph-visualizer.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/index.h"
#include "src/compiler/turboshaft/late-escape-analysis-reducer.h"
#include "src/compiler/turboshaft/machine-optimization-reducer.h"
#include "src/compiler/turboshaft/memory-optimization.h"
@ -94,6 +96,7 @@
#include "src/compiler/turboshaft/select-lowering-reducer.h"
#include "src/compiler/turboshaft/simplify-tf-loops.h"
#include "src/compiler/turboshaft/type-inference-reducer.h"
#include "src/compiler/turboshaft/typed-optimizations-reducer.h"
#include "src/compiler/turboshaft/types.h"
#include "src/compiler/turboshaft/value-numbering-reducer.h"
#include "src/compiler/turboshaft/variable-reducer.h"
@ -111,6 +114,7 @@
#include "src/logging/code-events.h"
#include "src/logging/counters.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/logging/runtime-call-stats.h"
#include "src/objects/shared-function-info.h"
#include "src/tracing/trace-event.h"
#include "src/utils/ostreams.h"
@ -2119,25 +2123,43 @@ struct OptimizeTurboshaftPhase {
}
};
struct TurboshaftTypeInferencePhase {
DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftTypeInference)
struct TurboshaftTypedOptimizationsPhase {
DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftTypedOptimizations)
void Run(PipelineData* data, Zone* temp_zone) {
DCHECK(data->HasTurboshaftGraph());
turboshaft::OptimizationPhase<turboshaft::TypedOptimizationsReducer,
turboshaft::TypeInferenceReducer>::
Run(&data->turboshaft_graph(), temp_zone, data->node_origins(),
std::tuple{turboshaft::TypeInferenceReducerArgs{data->isolate()}});
}
};
struct TurboshaftTypeAssertionsPhase {
DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftTypeAssertions)
void Run(PipelineData* data, Zone* temp_zone) {
DCHECK(data->HasTurboshaftGraph());
UnparkedScopeIfNeeded scope(data->broker());
if (v8_flags.turboshaft_assert_types) {
turboshaft::OptimizationPhase<turboshaft::AssertTypesReducer,
turboshaft::ValueNumberingReducer,
turboshaft::TypeInferenceReducer>::
Run(&data->turboshaft_graph(), temp_zone, data->node_origins(),
std::tuple{turboshaft::TypeInferenceReducerArgs{data->isolate()},
turboshaft::AssertTypesReducerArgs{data->isolate()}});
} else {
turboshaft::OptimizationPhase<turboshaft::TypeInferenceReducer>::Run(
&data->turboshaft_graph(), temp_zone, data->node_origins(),
std::tuple{turboshaft::TypeInferenceReducerArgs{data->isolate()}});
}
};
struct TurboshaftDeadCodeEliminationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftDeadCodeElimination)
void Run(PipelineData* data, Zone* temp_zone) {
DCHECK(data->HasTurboshaftGraph());
turboshaft::OptimizationPhase<turboshaft::DeadCodeEliminationReducer>::Run(
&data->turboshaft_graph(), temp_zone, data->node_origins(),
std::tuple{});
}
};
@ -2741,6 +2763,13 @@ struct PrintTurboshaftGraphPhase {
}
return false;
});
PrintTurboshaftCustomDataPerOperation(
data->info(), "Use Count (saturated)", data->turboshaft_graph(),
[](std::ostream& stream, const turboshaft::Graph& graph,
turboshaft::OpIndex index) -> bool {
stream << static_cast<int>(graph.Get(index).saturated_use_count);
return true;
});
}
if (data->info()->trace_turbo_graph()) {
@ -3119,8 +3148,19 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<PrintTurboshaftGraphPhase>(
DecompressionOptimizationPhase::phase_name());
Run<TurboshaftTypeInferencePhase>();
Run<PrintTurboshaftGraphPhase>(TurboshaftTypeInferencePhase::phase_name());
Run<TurboshaftTypedOptimizationsPhase>();
Run<PrintTurboshaftGraphPhase>(
TurboshaftTypedOptimizationsPhase::phase_name());
if (v8_flags.turboshaft_assert_types) {
Run<TurboshaftTypeAssertionsPhase>();
Run<PrintTurboshaftGraphPhase>(
TurboshaftTypeAssertionsPhase::phase_name());
}
Run<TurboshaftDeadCodeEliminationPhase>();
Run<PrintTurboshaftGraphPhase>(
TurboshaftDeadCodeEliminationPhase::phase_name());
Run<TurboshaftRecreateSchedulePhase>(linkage);
TraceSchedule(data->info(), data, data->schedule(),

View File

@ -116,6 +116,15 @@ class ReducerBase : public ReducerBaseForwarder<Next> {
void Analyze() {}
bool ShouldEliminateOperation(OpIndex index, const Operation& op) {
return op.saturated_use_count == 0;
}
bool ShouldEliminateBranch(OpIndex index, const BranchOp& op,
BlockIndex& goto_block) {
return false;
}
// Get, GetPredecessorValue, Set and NewFreshVariable should be overwritten by
// the VariableReducer. If the reducer stack has no VariableReducer, then
// those methods should not be called.

View File

@ -326,8 +326,11 @@ class BranchEliminationReducer : public Next {
// inline the destination block in place of the Goto.
// We pass `false` to `direct_input` here, as we're looking one
// block ahead of the current one.
Asm().CloneAndInlineBlock(old_dst, false);
return OpIndex::Invalid();
// TODO(nicohartmann@): Temporarily disable this "optimization" because it
// prevents dead code elimination in some cases. Reevaluate this and
// reenable if phases have been reordered properly.
// Asm().CloneAndInlineBlock(old_dst, false);
// return OpIndex::Invalid();
}
goto no_change;

View File

@ -0,0 +1,461 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_DEAD_CODE_ELIMINATION_REDUCER_H_
#define V8_COMPILER_TURBOSHAFT_DEAD_CODE_ELIMINATION_REDUCER_H_
#include <iomanip>
#include "src/common/globals.h"
#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/turboshaft/assembler.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/index.h"
#include "src/compiler/turboshaft/operations.h"
namespace v8::internal::compiler::turboshaft {
// General overview
//
// DeadCodeAnalysis iterates the graph backwards to propagate liveness
// information. This information consists of the ControlState and the
// OperationState.
//
// OperationState reflects the liveness of operations. An operation is live if
//
// 1) The operation has the `is_required_when_unused` property
// 2) Any of its outputs is live (is used in a live operation).
//
// We introduce the concept of `weak live` which only differs from (strong)
// liveness on how it impacts the ControlState, but is otherwise identical. On
// operation is weak live if
//
// Any of its outputs is weak live (is used in a weak live operation) and the
// operation is not (strong) live.
//
// If the operation is neither strong nor weak live, the operation is dead and
// can be eliminated.
//
// ControlState describes to which block we could jump immediately without
// changing the program semantics. That is missing any side effects, required
// control flow or any strong(!) live operations. This information is then used
// at BranchOps to rewrite them to a GotoOp towards the corresponding block.
// Weak live operations thus are not eliminated but allow control flow to be
// rewritten around them. By marking stack checks (and all operations that they
// depend on) as weak live, this allows otherwise empty loops to be eliminated.
// From the output control state(s) c after an operation, the control state c'
// before the operation is computed as follows:
//
// | Bi if ct, cf are Bi or Unreachable
// c' = [Branch](ct, cf) = {
// | NotEliminatable otherwise
//
// And if c' = Bi, then the BranchOp can be rewritten into GotoOp(Bi).
//
// | NotEliminatable if Op is strong live
// c' = [Op](c) = {
// | c otherwise
//
// | Bk if c = Bk
// c' = [Merge i](c) = { Bi if Merge i has no live phis
// | NotEliminatable otherwise
//
// Where Merge is an imaginary operation at the start of every merge block. This
// is the important part for the analysis. If block `Merge i` does not have any
// strong live phi operations, then we don't necessarily need to distinguish the
// control flow paths going into that block and if we further don't encounter
// any (strong) live operations along any of the paths leading to `Merge i`
// starting at some BranchOp, we can skip both branches and eliminate the
// control flow entirely by rewriting the BranchOp into a GotoOp(Bi). Notice
// that if the control state already describes a potential Goto-target Bk, then
// we do not replace that in order to track the farthest block we can jump to.
struct ControlState {
// Lattice:
//
// NotEliminatable
// / | \
// B1 ... Bn
// \ | /
// Unreachable
//
// We use ControlState to propagate information during the analysis about how
// branches can be rewritten. Read the values like this:
// - NotEliminatable: We cannot rewrite a branch, because we need the control
// flow (e.g. because we have seen live operations on either branch or need
// the phi at the merge).
// - Bj: Control can be rewritten to go directly to Block Bj, because all
// paths to that block are free of live operations.
// - Unreachable: This is the bottom element and it represents that we haven't
// seen anything live yet and are free to rewrite branches to any block
// reachable from the current block.
enum Kind {
kUnreachable,
kBlock,
kNotEliminatable,
};
static ControlState NotEliminatable() {
return ControlState{kNotEliminatable};
}
static ControlState Block(BlockIndex block) {
return ControlState{kBlock, block};
}
static ControlState Unreachable() { return ControlState{kUnreachable}; }
explicit ControlState(Kind kind, BlockIndex block = BlockIndex::Invalid())
: kind(kind), block(block) {}
static ControlState LeastUpperBound(const ControlState& lhs,
const ControlState& rhs) {
switch (lhs.kind) {
case Kind::kUnreachable:
return rhs;
case Kind::kBlock: {
if (rhs.kind == Kind::kUnreachable) return lhs;
if (rhs.kind == Kind::kNotEliminatable) return rhs;
if (lhs.block == rhs.block) return lhs;
return NotEliminatable();
}
case Kind::kNotEliminatable:
return lhs;
}
}
Kind kind;
BlockIndex block;
};
inline std::ostream& operator<<(std::ostream& stream,
const ControlState& state) {
switch (state.kind) {
case ControlState::kNotEliminatable:
return stream << "NotEliminatable";
case ControlState::kBlock:
return stream << "Block(" << state.block << ")";
case ControlState::kUnreachable:
return stream << "Unreachable";
}
}
inline bool operator==(const ControlState& lhs, const ControlState& rhs) {
if (lhs.kind != rhs.kind) return false;
if (lhs.kind == ControlState::kBlock) {
DCHECK_EQ(rhs.kind, ControlState::kBlock);
return lhs.block == rhs.block;
}
return true;
}
inline bool operator!=(const ControlState& lhs, const ControlState& rhs) {
return !(lhs == rhs);
}
struct OperationState {
// Lattice:
//
// Live
// |
// WeakLive
// |
// Dead
//
// Describes the liveness state of an operation. We use the notion of weak
// liveness to express that an operation needs to be kept if we cannot
// eliminate (jump over) the entire basic block. In other words: A weak live
// operation will not be eliminated, but it doesn't prevent the propagation of
// the control state to allow to jump over the block if it contains no
// (strong) live operations. This will be useful to eliminate loops that are
// kept alive only by the contained stack checks.
enum Liveness : uint8_t {
kDead,
kWeakLive,
kLive,
};
static Liveness LeastUpperBound(Liveness lhs, Liveness rhs) {
static_assert(kLive > kWeakLive && kWeakLive > kDead);
return std::max(lhs, rhs);
}
};
inline std::ostream& operator<<(std::ostream& stream,
OperationState::Liveness liveness) {
switch (liveness) {
case OperationState::kDead:
return stream << "Dead";
case OperationState::kWeakLive:
return stream << "WeakLive";
case OperationState::kLive:
return stream << "Live";
}
UNREACHABLE();
}
class DeadCodeAnalysis {
public:
explicit DeadCodeAnalysis(Graph& graph, Zone* phase_zone)
: graph_(graph),
liveness_(graph.op_id_count(), OperationState::kDead, phase_zone),
entry_control_state_(graph.block_count(), ControlState::Unreachable(),
phase_zone),
rewritable_branch_targets_(phase_zone) {}
template <bool trace_analysis>
std::pair<FixedSidetable<OperationState::Liveness>,
ZoneMap<uint32_t, BlockIndex>>
Run() {
if constexpr (trace_analysis) {
std::cout << "===== Running Dead Code Analysis =====\n";
}
for (uint32_t unprocessed_count = graph_.block_count();
unprocessed_count > 0;) {
BlockIndex block_index = static_cast<BlockIndex>(unprocessed_count - 1);
--unprocessed_count;
const Block& block = graph_.Get(block_index);
ProcessBlock<trace_analysis>(block, &unprocessed_count);
}
if constexpr (trace_analysis) {
std::cout << "===== Results =====\n== Operation State ==\n";
for (Block b : graph_.blocks()) {
std::cout << PrintAsBlockHeader{b} << ":\n";
for (OpIndex index : graph_.OperationIndices(b)) {
std::cout << " " << std::setw(8) << liveness_[index] << " "
<< std::setw(3) << index.id() << ": " << graph_.Get(index)
<< "\n";
}
}
std::cout << "== Rewritable Branches ==\n";
for (auto [branch_id, target] : rewritable_branch_targets_) {
DCHECK(target.valid());
std::cout << " " << std::setw(3) << branch_id << ": Branch ==> Goto "
<< target.id() << "\n";
}
std::cout << "==========\n";
}
return {std::move(liveness_), std::move(rewritable_branch_targets_)};
}
template <bool trace_analysis>
void ProcessBlock(const Block& block, uint32_t* unprocessed_count) {
if constexpr (trace_analysis) {
std::cout << "\n==========\n=== Processing " << PrintAsBlockHeader{block}
<< ":\n==========\nEXIT CONTROL STATE\n";
}
auto successors = SuccessorBlocks(block.LastOperation(graph_));
ControlState control_state = ControlState::Unreachable();
for (size_t i = 0; i < successors.size(); ++i) {
const auto& r = entry_control_state_[successors[i]->index()];
if constexpr (trace_analysis) {
std::cout << " Successor " << successors[i]->index() << ": " << r
<< "\n";
}
control_state = ControlState::LeastUpperBound(control_state, r);
}
if constexpr (trace_analysis)
std::cout << "Combined: " << control_state << "\n";
// If control_state == ControlState::Block(b), then the merge block b is
// reachable through every path starting at the current block without any
// live operations.
if constexpr (trace_analysis) std::cout << "OPERATION STATE\n";
auto op_range = graph_.OperationIndices(block);
bool has_live_phis = false;
for (auto it = op_range.end(); it != op_range.begin();) {
--it;
OpIndex index = *it;
const Operation& op = graph_.Get(index);
if constexpr (trace_analysis) std::cout << index << ":" << op << "\n";
OperationState::Liveness op_state = liveness_[index];
if (op.Is<BranchOp>()) {
if (control_state != ControlState::NotEliminatable()) {
// Branch is still dead.
op_state = OperationState::kWeakLive;
// If we know a target block we can rewrite into a goto.
if (control_state.kind == ControlState::kBlock) {
BlockIndex target = control_state.block;
DCHECK(target.valid());
rewritable_branch_targets_[index.id()] = target;
}
} else {
// Branch is live. We cannot rewrite it.
op_state = OperationState::kLive;
auto it = rewritable_branch_targets_.find(index.id());
if (it != rewritable_branch_targets_.end()) {
rewritable_branch_targets_.erase(it);
}
}
} else if (op.saturated_use_count == 0) {
// Operation is already recognized as dead by a previous analysis.
DCHECK_EQ(op_state, OperationState::kDead);
} else if (op.Is<GotoOp>()) {
// Gotos are WeakLive.
op_state = OperationState::kWeakLive;
} else if (op.Properties().is_required_when_unused) {
op_state = OperationState::kLive;
} else if (op.Is<PhiOp>()) {
has_live_phis = has_live_phis || (op_state == OperationState::kLive);
if (block.IsLoop()) {
const PhiOp& phi = op.Cast<PhiOp>();
// Check if the operation state of the input coming from the backedge
// changes the liveness of the phi. In that case, trigger a revisit of
// the loop.
if (liveness_[phi.inputs()[PhiOp::kLoopPhiBackEdgeIndex]] <
op_state) {
if constexpr (trace_analysis) {
std::cout
<< "Operation state has changed. Need to revisit loop.\n";
}
Block* backedge = block.LastPredecessor();
// Revisit the loop by increasing the {unprocessed_count} to include
// all blocks of the loop.
*unprocessed_count =
std::max(*unprocessed_count, backedge->index().id() + 1);
}
}
}
// TODO(nicohartmann@): Handle Stack Guards to allow elimination of
// otherwise empty loops.
//
// if(const CallOp* call = op.TryCast<CallOp>()) {
// if(std::string(call->descriptor->descriptor->debug_name())
// == "StackGuard") {
// DCHECK_EQ(op_state, OperationState::kLive);
// op_state = OperationState::kWeakLive;
// }
// }
DCHECK_LE(liveness_[index], op_state);
// If everything is still dead. We don't need to update anything.
if (op_state == OperationState::kDead) continue;
// We have a (possibly weak) live operation.
if constexpr (trace_analysis) {
std::cout << " " << op_state << " <== " << liveness_[index] << "\n";
}
liveness_[index] = op_state;
if constexpr (trace_analysis) {
if (op.input_count > 0) std::cout << " Updating inputs:\n";
}
for (OpIndex input : op.inputs()) {
auto old_input_state = liveness_[input];
auto new_input_state =
OperationState::LeastUpperBound(old_input_state, op_state);
if constexpr (trace_analysis) {
std::cout << " " << input << ": " << new_input_state
<< " <== " << old_input_state << " || " << op_state << "\n";
}
liveness_[input] = new_input_state;
}
if (op_state == OperationState::kLive &&
control_state != ControlState::NotEliminatable()) {
// This block has live operations, which means that we can't skip it.
// Reset the ControlState to NotEliminatable.
if constexpr (trace_analysis) {
std::cout << "Block has live operations. New control state: "
<< ControlState::NotEliminatable() << "\n";
}
control_state = ControlState::NotEliminatable();
}
}
if constexpr (trace_analysis) {
std::cout << "ENTRY CONTROL STATE\nAfter operations: " << control_state
<< "\n";
}
// If this block is a merge and we don't have any live phis, it is a
// potential target for branch redirection.
if (block.IsLoopOrMerge()) {
if (!has_live_phis) {
if (control_state.kind != ControlState::kBlock) {
control_state = ControlState::Block(block.index());
if constexpr (trace_analysis) {
std::cout
<< "Block is loop or merge and has no live phi operations.\n";
}
} else if constexpr (trace_analysis) {
std::cout << "Block is loop or merge and has no live phi "
"operations.\nControl state already has a goto block: "
<< control_state << "\n";
}
}
if (block.IsLoop() &&
entry_control_state_[block.index()] != control_state) {
if constexpr (trace_analysis) {
std::cout << "Control state has changed. Need to revisit loop.\n";
}
Block* backedge = block.LastPredecessor();
DCHECK_NOT_NULL(backedge);
// Revisit the loop by increasing the {unprocessed_count} to include
// all blocks of the loop.
*unprocessed_count =
std::max(*unprocessed_count, backedge->index().id() + 1);
}
}
if constexpr (trace_analysis) {
std::cout << "Final: " << control_state << "\n";
}
entry_control_state_[block.index()] = control_state;
}
private:
Graph& graph_;
FixedSidetable<OperationState::Liveness> liveness_;
FixedBlockSidetable<ControlState> entry_control_state_;
ZoneMap<uint32_t, BlockIndex> rewritable_branch_targets_;
};
template <class Next>
class DeadCodeEliminationReducer : public Next {
public:
using Next::Asm;
template <class... Args>
explicit DeadCodeEliminationReducer(const std::tuple<Args...>& args)
: Next(args),
branch_rewrite_targets_(Asm().phase_zone()),
analyzer_(Asm().modifiable_input_graph(), Asm().phase_zone()) {}
void Analyze() {
// TODO(nicohartmann@): We might want to make this a flag.
constexpr bool trace_analysis = false;
std::tie(liveness_, branch_rewrite_targets_) =
analyzer_.Run<trace_analysis>();
Next::Analyze();
}
bool ShouldEliminateOperation(OpIndex index, const Operation& op) {
DCHECK(!op.Is<BranchOp>());
return (*liveness_)[index] == OperationState::kDead;
}
bool ShouldEliminateBranch(OpIndex index, const BranchOp& op,
BlockIndex& goto_target) {
auto it = branch_rewrite_targets_.find(index.id());
if (it == branch_rewrite_targets_.end()) return false;
goto_target = it->second;
return true;
}
private:
base::Optional<FixedSidetable<OperationState::Liveness>> liveness_;
ZoneMap<uint32_t, BlockIndex> branch_rewrite_targets_;
DeadCodeAnalysis analyzer_;
};
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_DEAD_CODE_ELIMINATION_REDUCER_H_

View File

@ -17,6 +17,7 @@
#include "src/base/vector.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/index.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/compiler/turboshaft/snapshot-table.h"
@ -56,66 +57,6 @@ V8_INLINE bool ShouldSkipOperation(const Operation& op) {
return op.saturated_use_count == 0;
}
// TODO(dmercadier, tebbi): transform this analyzer into a reducer, and plug in
// into some reducer stacks.
struct LivenessAnalyzer : AnalyzerBase {
using Base = AnalyzerBase;
// Using `uint8_t` instead of `bool` prevents `std::vector` from using a
// bitvector, which has worse performance.
std::vector<uint8_t> op_used;
LivenessAnalyzer(const Graph& graph, Zone* phase_zone)
: AnalyzerBase(graph, phase_zone), op_used(graph.op_id_count(), false) {}
bool OpIsUsed(OpIndex i) { return op_used[i.id()]; }
void Run() {
for (uint32_t unprocessed_count = graph.block_count();
unprocessed_count > 0;) {
BlockIndex block_index = static_cast<BlockIndex>(unprocessed_count - 1);
--unprocessed_count;
const Block& block = graph.Get(block_index);
if (V8_UNLIKELY(block.IsLoop())) {
ProcessBlock<true>(block, &unprocessed_count);
} else {
ProcessBlock<false>(block, &unprocessed_count);
}
}
}
template <bool is_loop>
void ProcessBlock(const Block& block, uint32_t* unprocessed_count) {
auto op_range = graph.OperationIndices(block);
for (auto it = op_range.end(); it != op_range.begin();) {
--it;
OpIndex index = *it;
const Operation& op = graph.Get(index);
if (op.Properties().is_required_when_unused) {
op_used[index.id()] = true;
} else if (!OpIsUsed(index)) {
continue;
}
if constexpr (is_loop) {
if (op.Is<PhiOp>()) {
const PhiOp& phi = op.Cast<PhiOp>();
// Mark the loop backedge as used. Trigger a revisit if it wasn't
// marked as used already.
if (!OpIsUsed(phi.inputs()[PhiOp::kLoopPhiBackEdgeIndex])) {
Block* backedge = block.LastPredecessor();
// Revisit the loop by increasing the `unprocessed_count` to include
// all blocks of the loop.
*unprocessed_count =
std::max(*unprocessed_count, backedge->index().id() + 1);
}
}
}
for (OpIndex input : op.inputs()) {
op_used[input.id()] = true;
}
}
}
};
template <template <class> class... Reducers>
class OptimizationPhase {
public:
@ -337,7 +278,9 @@ class GraphVisitor {
USE(first_output_index);
const Operation& op = input_graph().Get(index);
if constexpr (trace_reduction) TraceReductionStart(index);
if (ShouldSkipOperation(op)) {
if (!op.Is<BranchOp>() && assembler().ShouldEliminateOperation(index, op)) {
// Branch should never be eliminated. They must be reduced to
// Goto operations. See VisitGoto and ShouldEliminateBranch.
if constexpr (trace_reduction) TraceOperationSkipped();
return true;
}
@ -430,6 +373,12 @@ class GraphVisitor {
return OpIndex::Invalid();
}
V8_INLINE OpIndex VisitBranch(const BranchOp& op) {
BlockIndex goto_block_index;
if (assembler().ShouldEliminateBranch(input_graph().Index(op), op,
goto_block_index)) {
DCHECK(goto_block_index.valid());
return assembler().ReduceGoto(MapToNewGraph(goto_block_index));
}
Block* if_true = MapToNewGraph(op.if_true->index());
Block* if_false = MapToNewGraph(op.if_false->index());
return assembler().ReduceBranch(MapToNewGraph(op.condition()), if_true,

View File

@ -79,6 +79,8 @@ class FixedSidetable {
static_assert(std::is_same_v<Key, OpIndex> ||
std::is_same_v<Key, BlockIndex>);
explicit FixedSidetable(size_t size, Zone* zone) : table_(size, zone) {}
FixedSidetable(size_t size, const T& default_value, Zone* zone)
: table_(size, default_value, zone) {}
T& operator[](Key op) {
DCHECK_LT(op.id(), table_.size());

View File

@ -246,22 +246,26 @@ struct FloatOperationTyper {
static constexpr float_t inf = std::numeric_limits<float_t>::infinity();
static constexpr int kSetThreshold = type_t::kMaxSetSize;
static type_t Range(float_t min, float_t max, bool maybe_nan, Zone* zone) {
static type_t Range(float_t min, float_t max, uint32_t special_values,
Zone* zone) {
DCHECK_LE(min, max);
if (min == max) return Set({min}, maybe_nan, zone);
return type_t::Range(
min, max, maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues, zone);
if (min == max) return Set({min}, special_values, zone);
return type_t::Range(min, max, special_values, zone);
}
static type_t Set(std::vector<float_t> elements, bool maybe_nan, Zone* zone) {
static type_t Set(std::vector<float_t> elements, uint32_t special_values,
Zone* zone) {
base::sort(elements);
elements.erase(std::unique(elements.begin(), elements.end()),
elements.end());
if (base::erase_if(elements, [](float_t v) { return std::isnan(v); }) > 0) {
maybe_nan = true;
special_values |= type_t::kNaN;
}
return type_t::Set(
elements, maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues, zone);
if (base::erase_if(elements, [](float_t v) { return IsMinusZero(v); }) >
0) {
special_values |= type_t::kMinusZero;
}
return type_t::Set(elements, special_values, zone);
}
static bool IsIntegerSet(const type_t& t) {
@ -272,10 +276,10 @@ struct FloatOperationTyper {
float_t unused_ipart;
float_t min = t.set_element(0);
if (std::modf(min, &unused_ipart) != 0.0) return false;
if (min == -std::numeric_limits<float_t>::infinity()) return false;
if (min == -inf) return false;
float_t max = t.set_element(size - 1);
if (std::modf(max, &unused_ipart) != 0.0) return false;
if (max == std::numeric_limits<float_t>::infinity()) return false;
if (max == inf) return false;
for (int i = 1; i < size - 1; ++i) {
if (std::modf(t.set_element(i), &unused_ipart) != 0.0) return false;
@ -283,11 +287,15 @@ struct FloatOperationTyper {
return true;
}
static bool IsZeroish(const type_t& l) {
return l.has_nan() || l.has_minus_zero() || l.Contains(0);
}
// Tries to construct the product of two sets where values are generated using
// {combine}. Returns Type::Invalid() if a set cannot be constructed (e.g.
// because the result exceeds the maximal number of set elements).
static Type ProductSet(const type_t& l, const type_t& r, bool maybe_nan,
Zone* zone,
static Type ProductSet(const type_t& l, const type_t& r,
uint32_t special_values, Zone* zone,
std::function<float_t(float_t, float_t)> combine) {
DCHECK(l.is_set());
DCHECK(r.is_set());
@ -297,26 +305,46 @@ struct FloatOperationTyper {
results.push_back(combine(l.set_element(i), r.set_element(j)));
}
}
maybe_nan = (base::erase_if(results,
[](float_t v) { return std::isnan(v); }) > 0) ||
maybe_nan;
if (base::erase_if(results, [](float_t v) { return std::isnan(v); }) > 0) {
special_values |= type_t::kNaN;
}
if (base::erase_if(results, [](float_t v) { return IsMinusZero(v); }) > 0) {
special_values |= type_t::kMinusZero;
}
base::sort(results);
auto it = std::unique(results.begin(), results.end());
if (std::distance(results.begin(), it) > kSetThreshold)
return Type::Invalid();
results.erase(it, results.end());
return Set(std::move(results),
maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues, zone);
if (results.empty()) return type_t::OnlySpecialValues(special_values);
return Set(std::move(results), special_values, zone);
}
static Type Add(const type_t& l, const type_t& r, Zone* zone) {
static Type Add(type_t l, type_t r, Zone* zone) {
// Addition can return NaN if either input can be NaN or we try to compute
// the sum of two infinities of opposite sign.
if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
bool maybe_nan = l.has_nan() || r.has_nan();
// Addition can yield minus zero only if both inputs can be minus zero.
bool maybe_minuszero = true;
if (l.has_minus_zero()) {
l = type_t::LeastUpperBound(l, type_t::Constant(0), zone);
} else {
maybe_minuszero = false;
}
if (r.has_minus_zero()) {
r = type_t::LeastUpperBound(r, type_t::Constant(0), zone);
} else {
maybe_minuszero = false;
}
uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
(maybe_minuszero ? type_t::kMinusZero : 0);
// If both sides are decently small sets, we produce the product set.
auto combine = [](float_t a, float_t b) { return a + b; };
if (l.is_set() && r.is_set()) {
auto result = ProductSet(l, r, maybe_nan, zone, combine);
auto result = ProductSet(l, r, special_values, zone, combine);
if (!result.IsInvalid()) return result;
}
@ -334,24 +362,41 @@ struct FloatOperationTyper {
for (int i = 0; i < 4; ++i) {
if (std::isnan(results[i])) ++nans;
}
if (nans > 0) {
special_values |= type_t::kNaN;
if (nans >= 4) {
// All combinations of inputs produce NaN.
return type_t::NaN();
return type_t::OnlySpecialValues(special_values);
}
}
maybe_nan = maybe_nan || nans > 0;
const float_t result_min = array_min(results);
const float_t result_max = array_max(results);
return Range(result_min, result_max, maybe_nan, zone);
return Range(result_min, result_max, special_values, zone);
}
static Type Subtract(const type_t& l, const type_t& r, Zone* zone) {
static Type Subtract(type_t l, type_t r, Zone* zone) {
// Subtraction can return NaN if either input can be NaN or we try to
// compute the sum of two infinities of opposite sign.
if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
bool maybe_nan = l.has_nan() || r.has_nan();
// Subtraction can yield minus zero if {lhs} can be minus zero and {rhs}
// can be zero.
bool maybe_minuszero = false;
if (l.has_minus_zero()) {
l = type_t::LeastUpperBound(l, type_t::Constant(0), zone);
maybe_minuszero = r.Contains(0);
}
if (r.has_minus_zero()) {
r = type_t::LeastUpperBound(r, type_t::Constant(0), zone);
}
uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
(maybe_minuszero ? type_t::kMinusZero : 0);
// If both sides are decently small sets, we produce the product set.
auto combine = [](float_t a, float_t b) { return a - b; };
if (l.is_set() && r.is_set()) {
auto result = ProductSet(l, r, maybe_nan, zone, combine);
auto result = ProductSet(l, r, special_values, zone, combine);
if (!result.IsInvalid()) return result;
}
@ -369,24 +414,44 @@ struct FloatOperationTyper {
for (int i = 0; i < 4; ++i) {
if (std::isnan(results[i])) ++nans;
}
if (nans > 0) {
special_values |= type_t::kNaN;
if (nans >= 4) {
// All combinations of inputs produce NaN.
return type_t::NaN();
}
maybe_nan = maybe_nan || nans > 0;
}
const float_t result_min = array_min(results);
const float_t result_max = array_max(results);
return Range(result_min, result_max, maybe_nan, zone);
return Range(result_min, result_max, special_values, zone);
}
static Type Multiply(const type_t& l, const type_t& r, Zone* zone) {
static Type Multiply(type_t l, type_t r, Zone* zone) {
// Multiplication propagates NaN:
// NaN * x = NaN (regardless of sign of x)
// 0 * Infinity = NaN (regardless of signs)
if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
bool maybe_nan = l.has_nan() || r.has_nan();
bool maybe_nan = l.has_nan() || r.has_nan() ||
(IsZeroish(l) && (r.min() == -inf || r.max() == inf)) ||
(IsZeroish(r) && (l.min() == -inf || r.max() == inf));
// Try to rule out -0.
bool maybe_minuszero = l.has_minus_zero() || r.has_minus_zero() ||
(IsZeroish(l) && r.min() < 0.0) ||
(IsZeroish(r) && l.min() < 0.0);
if (l.has_minus_zero()) {
l = type_t::LeastUpperBound(l, type_t::Constant(0), zone);
}
if (r.has_minus_zero()) {
r = type_t::LeastUpperBound(r, type_t::Constant(0), zone);
}
uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
(maybe_minuszero ? type_t::kMinusZero : 0);
// If both sides are decently small sets, we produce the product set.
auto combine = [](float_t a, float_t b) { return a * b; };
if (l.is_set() && r.is_set()) {
auto result = ProductSet(l, r, maybe_nan, zone, combine);
auto result = ProductSet(l, r, special_values, zone, combine);
if (!result.IsInvalid()) return result;
}
@ -406,63 +471,58 @@ struct FloatOperationTyper {
}
}
float_t result_min = array_min(results);
float_t result_max = array_max(results);
if (result_min <= 0.0 && 0.0 <= result_max &&
(l_min < 0.0 || r_min < 0.0)) {
special_values |= type_t::kMinusZero;
// Remove -0.
result_min += 0.0;
result_max += 0.0;
}
// 0 * V8_INFINITY is NaN, regardless of sign
if (((l_min == -inf || l_max == inf) && (r_min <= 0.0 && 0.0 <= r_max)) ||
((r_min == -inf || r_max == inf) && (l_min <= 0.0 && 0.0 <= l_max))) {
maybe_nan = true;
special_values |= type_t::kNaN;
}
const float_t result_min = array_min(results);
const float_t result_max = array_max(results);
type_t type = Range(result_min, result_max, maybe_nan, zone);
DCHECK_IMPLIES(
result_min <= 0.0 && 0.0 <= result_max && (l_min < 0.0 || r_min < 0.0),
type.Contains(-0.0));
type_t type = Range(result_min, result_max, special_values, zone);
return type;
}
static Type Divide(const type_t& l, const type_t& r, Zone* zone) {
// Division is tricky, so all we do is try ruling out -0 and NaN.
if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
bool maybe_nan = l.has_nan() || r.has_nan();
auto [l_min, l_max] = l.minmax();
auto [r_min, r_max] = r.minmax();
bool maybe_nan =
(IsZeroish(l) && IsZeroish(r)) ||
((l_min == -inf || l_max == inf) && (r_min == -inf || r_max == inf));
// Try to rule out -0.
bool maybe_minuszero =
(IsZeroish(l) && r.min() < 0.0) || (r.min() == -inf || r.max() == inf);
uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
(maybe_minuszero ? type_t::kMinusZero : 0);
// If both sides are decently small sets, we produce the product set.
auto combine = [](float_t a, float_t b) {
if (b == 0) return nan_v<Bits>;
return a / b;
};
if (l.is_set() && r.is_set()) {
auto result = ProductSet(l, r, maybe_nan, zone, combine);
auto result = ProductSet(l, r, special_values, zone, combine);
if (!result.IsInvalid()) return result;
}
// Otherwise try to construct a range.
auto [l_min, l_max] = l.minmax();
auto [r_min, r_max] = r.minmax();
maybe_nan =
maybe_nan || (l.Contains(0) && r.Contains(0)) ||
((l_min == -inf || l_max == inf) && (r_min == -inf || r_max == inf));
// If the divisor spans across 0, we give up on a precise type.
if (std::signbit(r_min) != std::signbit(r_max)) {
return type_t::Any(maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues);
}
// If divisor includes 0, we can try to at least infer sign of the result.
if (r.Contains(0)) {
DCHECK_EQ(r_min, 0);
if (l_max < 0) {
// All values are negative.
return Range(-inf, next_smaller(float_t{0}),
maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues, zone);
}
if (r_min >= 0) {
// All values are positive.
return Range(0, inf,
maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues, zone);
}
return type_t::Any(maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues);
}
const bool r_all_positive = r_min >= 0 && !r.has_minus_zero();
const bool r_all_negative = r_max < 0;
// If r doesn't span 0, we can try to compute a more precise type.
if (r_all_positive || r_all_negative) {
// If r does not contain 0 or -0, we can compute a range.
if (r_min > 0 && !r.has_minus_zero()) {
std::array<float_t, 4> results;
results[0] = l_min / r_min;
results[1] = l_min / r_max;
@ -471,23 +531,56 @@ struct FloatOperationTyper {
const float_t result_min = array_min(results);
const float_t result_max = array_max(results);
return Range(result_min, result_max,
maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues, zone);
return Range(result_min, result_max, special_values, zone);
}
static Type Modulus(const type_t& l, const type_t& r, Zone* zone) {
// Otherwise we try to check for the sign of the result.
if (l_max < 0) {
if (r_all_positive) {
// All values are negative.
DCHECK_NE(special_values & type_t::kMinusZero, 0);
return Range(-inf, next_smaller(float_t{0}), special_values, zone);
} else {
DCHECK(r_all_negative);
// All values are positive.
return Range(0, inf, special_values, zone);
}
} else if (l_min >= 0 && !l.has_minus_zero()) {
if (r_all_positive) {
// All values are positive.
DCHECK_EQ(special_values & type_t::kMinusZero, 0);
return Range(0, inf, special_values, zone);
} else {
DCHECK(r_all_negative);
// All values are negative.
return Range(-inf, next_smaller(float_t{0}), special_values, zone);
}
}
}
// Otherwise we give up on a precise type.
return type_t::Any(special_values);
}
static Type Modulus(type_t l, type_t r, Zone* zone) {
// Modulus can yield NaN if either {lhs} or {rhs} are NaN, or
// {lhs} is not finite, or the {rhs} is a zero value.
if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
bool maybe_nan =
l.has_nan() || r.has_nan() || l.Contains(-inf) || l.Contains(inf);
if (r.Contains(0)) {
if (r.IsSubtypeOf(type_t::Set({0}, type_t::kNaN, zone))) {
// If rhs contains nothing but 0 and NaN, the result will always be NaN.
return type_t::NaN();
l.has_nan() || IsZeroish(r) || l.min() == -inf || l.max() == inf;
// Deal with -0 inputs, only the signbit of {lhs} matters for the result.
bool maybe_minuszero = false;
if (l.has_minus_zero()) {
maybe_minuszero = true;
l = type_t::LeastUpperBound(l, type_t::Constant(0), zone);
}
maybe_nan = true;
if (r.has_minus_zero()) {
r = type_t::LeastUpperBound(r, type_t::Constant(0), zone);
}
uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
(maybe_minuszero ? type_t::kMinusZero : 0);
// For integer inputs {l} and {r} we can infer a precise type.
if (IsIntegerSet(l) && IsIntegerSet(r)) {
auto [l_min, l_max] = l.minmax();
@ -507,22 +600,37 @@ struct FloatOperationTyper {
min = 0.0 - abs;
max = abs;
}
if (min == max) return Set({min}, maybe_nan, zone);
return Range(min, max, maybe_nan, zone);
if (min == max) return Set({min}, special_values, zone);
return Range(min, max, special_values, zone);
}
return type_t::Any(maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues);
// Otherwise, we give up.
return type_t::Any(special_values);
}
static Type Min(const type_t& l, const type_t& r, Zone* zone) {
static Type Min(type_t l, type_t r, Zone* zone) {
if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
bool maybe_nan = l.has_nan() || r.has_nan();
// In order to ensure monotonicity of the computation below, we additionally
// pretend +0 is present (for simplicity on both sides).
bool maybe_minuszero = false;
if (l.has_minus_zero() && !(r.max() < 0.0)) {
maybe_minuszero = true;
l = type_t::LeastUpperBound(l, type_t::Constant(0), zone);
}
if (r.has_minus_zero() && !(l.max() < 0.0)) {
maybe_minuszero = true;
r = type_t::LeastUpperBound(r, type_t::Constant(0), zone);
}
uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
(maybe_minuszero ? type_t::kMinusZero : 0);
// If both sides are decently small sets, we produce the product set.
auto combine = [](float_t a, float_t b) { return std::min(a, b); };
if (l.is_set() && r.is_set()) {
// TODO(nicohartmann@): There is a faster way to compute this set.
auto result = ProductSet(l, r, maybe_nan, zone, combine);
auto result = ProductSet(l, r, special_values, zone, combine);
if (!result.IsInvalid()) return result;
}
@ -532,18 +640,32 @@ struct FloatOperationTyper {
auto min = std::min(l_min, r_min);
auto max = std::min(l_max, r_max);
return Range(min, max, maybe_nan, zone);
return Range(min, max, special_values, zone);
}
static Type Max(const type_t& l, const type_t& r, Zone* zone) {
static Type Max(type_t l, type_t r, Zone* zone) {
if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
bool maybe_nan = l.has_nan() || r.has_nan();
// In order to ensure monotonicity of the computation below, we additionally
// pretend +0 is present (for simplicity on both sides).
bool maybe_minuszero = false;
if (l.has_minus_zero() && !(r.min() > 0.0)) {
maybe_minuszero = true;
l = type_t::LeastUpperBound(l, type_t::Constant(0), zone);
}
if (r.has_minus_zero() && !(l.min() > 0.0)) {
maybe_minuszero = true;
r = type_t::LeastUpperBound(r, type_t::Constant(0), zone);
}
uint32_t special_values = (maybe_nan ? type_t::kNaN : 0) |
(maybe_minuszero ? type_t::kMinusZero : 0);
// If both sides are decently small sets, we produce the product set.
auto combine = [](float_t a, float_t b) { return std::max(a, b); };
if (l.is_set() && r.is_set()) {
// TODO(nicohartmann@): There is a faster way to compute this set.
auto result = ProductSet(l, r, maybe_nan, zone, combine);
auto result = ProductSet(l, r, special_values, zone, combine);
if (!result.IsInvalid()) return result;
}
@ -553,25 +675,32 @@ struct FloatOperationTyper {
auto min = std::max(l_min, r_min);
auto max = std::max(l_max, r_max);
return Range(min, max, maybe_nan, zone);
return Range(min, max, special_values, zone);
}
static Type Power(const type_t& l, const type_t& r, Zone* zone) {
if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
bool maybe_nan = l.has_nan() || r.has_nan();
// a ** b produces NaN if a < 0 && b is fraction.
if (l.min() <= 0.0 && !IsIntegerSet(r)) maybe_nan = true;
// a ** b produces -0 iff a == -0 and b is odd. Checking for all the cases
// where b does only contain odd integer values seems not worth the
// additional information we get here. We accept this over-approximation for
// now. We could refine this whenever we see a benefit.
uint32_t special_values =
(maybe_nan ? type_t::kNaN : 0) | l.special_values();
// If both sides are decently small sets, we produce the product set.
auto combine = [](float_t a, float_t b) { return std::pow(a, b); };
if (l.is_set() && r.is_set()) {
auto result = ProductSet(l, r, maybe_nan, zone, combine);
auto result = ProductSet(l, r, special_values, zone, combine);
if (!result.IsInvalid()) return result;
}
// a ** b produces NaN if a < 0 && b is fraction
if (l.min() <= 0.0 && !IsIntegerSet(r)) maybe_nan = true;
// TODO(nicohartmann@): Maybe we can produce a more precise range here.
return type_t::Any(maybe_nan ? type_t::kNaN : 0);
return type_t::Any(special_values);
}
static Type Atan2(const type_t& l, const type_t& r, Zone* zone) {
@ -591,7 +720,9 @@ struct FloatOperationTyper {
// There is no value for lhs that could make (lhs < -inf) true.
restrict_lhs = Type::None();
} else {
restrict_lhs = type_t::Range(-inf, next_smaller(rhs.max()), zone);
const auto max = next_smaller(rhs.max());
uint32_t sv = max >= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues;
restrict_lhs = type_t::Range(-inf, max, sv, zone);
}
Type restrict_rhs;
@ -599,7 +730,9 @@ struct FloatOperationTyper {
// There is no value for rhs that could make (inf < rhs) true.
restrict_rhs = Type::None();
} else {
restrict_rhs = type_t::Range(next_larger(lhs.min()), inf, zone);
const auto min = next_larger(lhs.min());
uint32_t sv = min <= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues;
restrict_rhs = type_t::Range(min, inf, sv, zone);
}
return {restrict_lhs, restrict_rhs};
@ -611,8 +744,14 @@ struct FloatOperationTyper {
static std::pair<Type, Type> RestrictionForLessThan_False(const type_t& lhs,
const type_t& rhs,
Zone* zone) {
return {type_t::Range(rhs.min(), inf, type_t::kNaN, zone),
type_t::Range(-inf, lhs.max(), type_t::kNaN, zone)};
uint32_t lhs_sv =
type_t::kNaN |
(rhs.min() <= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues);
uint32_t rhs_sv =
type_t::kNaN |
(lhs.max() >= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues);
return {type_t::Range(rhs.min(), inf, lhs_sv, zone),
type_t::Range(-inf, lhs.max(), rhs_sv, zone)};
}
// Computes the ranges to which the sides of the comparison (lhs <= rhs) can
@ -621,8 +760,12 @@ struct FloatOperationTyper {
// be NaN.
static std::pair<Type, Type> RestrictionForLessThanOrEqual_True(
const type_t& lhs, const type_t& rhs, Zone* zone) {
return {type_t::Range(-inf, rhs.max(), zone),
type_t::Range(lhs.min(), inf, zone)};
uint32_t lhs_sv =
rhs.max() >= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues;
uint32_t rhs_sv =
lhs.min() <= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues;
return {type_t::Range(-inf, rhs.max(), lhs_sv, zone),
type_t::Range(lhs.min(), inf, rhs_sv, zone)};
}
// Computes the ranges to which the sides of the comparison (lhs <= rhs) can
@ -635,8 +778,10 @@ struct FloatOperationTyper {
// The only value for lhs that could make (lhs <= inf) false is NaN.
restrict_lhs = type_t::NaN();
} else {
restrict_lhs =
type_t::Range(next_larger(rhs.min()), inf, type_t::kNaN, zone);
const auto min = next_larger(rhs.min());
uint32_t sv = type_t::kNaN |
(min <= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues);
restrict_lhs = type_t::Range(min, inf, sv, zone);
}
Type restrict_rhs;
@ -644,8 +789,10 @@ struct FloatOperationTyper {
// The only value for rhs that could make (-inf <= rhs) false is NaN.
restrict_rhs = type_t::NaN();
} else {
restrict_rhs =
type_t::Range(-inf, next_smaller(lhs.max()), type_t::kNaN, zone);
const auto max = next_smaller(lhs.max());
uint32_t sv = type_t::kNaN |
(max >= 0 ? type_t::kMinusZero : type_t::kNoSpecialValues);
restrict_rhs = type_t::Range(-inf, max, sv, zone);
}
return {restrict_lhs, restrict_rhs};
@ -658,9 +805,11 @@ class Typer {
switch (kind) {
case ConstantOp::Kind::kFloat32:
if (std::isnan(value.float32)) return Float32Type::NaN();
if (IsMinusZero(value.float32)) return Float32Type::MinusZero();
return Float32Type::Constant(value.float32);
case ConstantOp::Kind::kFloat64:
if (std::isnan(value.float64)) return Float64Type::NaN();
if (IsMinusZero(value.float64)) return Float64Type::MinusZero();
return Float64Type::Constant(value.float64);
case ConstantOp::Kind::kWord32:
return Word32Type::Constant(static_cast<uint32_t>(value.integral));

View File

@ -0,0 +1,90 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_TYPED_OPTIMIZATIONS_REDUCER_H_
#define V8_COMPILER_TURBOSHAFT_TYPED_OPTIMIZATIONS_REDUCER_H_
#include "src/compiler/turboshaft/assembler.h"
#include "src/compiler/turboshaft/index.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/compiler/turboshaft/uniform-reducer-adapter.h"
namespace v8::internal::compiler::turboshaft {
template <typename Next>
class TypedOptimizationsReducerImpl : public Next {
public:
using Next::Asm;
template <typename... Args>
explicit TypedOptimizationsReducerImpl(const std::tuple<Args...>& args)
: Next(args), types_(Asm().output_graph().operation_types()) {}
template <Opcode opcode, typename Continuation, typename... Args>
OpIndex ReduceOperation(Args... args) {
OpIndex index = Continuation{this}.Reduce(args...);
if (!index.valid()) return index;
if constexpr (opcode == Opcode::kConstant) {
return index;
} else {
Type type = GetType(index);
if (type.IsInvalid()) return index;
switch (type.kind()) {
case Type::Kind::kWord32: {
auto w32 = type.AsWord32();
if (auto c = w32.try_get_constant()) {
return Asm().Word32Constant(*c);
}
break;
}
case Type::Kind::kWord64: {
auto w64 = type.AsWord64();
if (auto c = w64.try_get_constant()) {
return Asm().Word64Constant(*c);
}
break;
}
case Type::Kind::kFloat32: {
auto f32 = type.AsFloat32();
if (f32.is_only_nan()) {
return Asm().Float32Constant(nan_v<32>);
}
if (auto c = f32.try_get_constant()) {
return Asm().Float32Constant(*c);
}
break;
}
case Type::Kind::kFloat64: {
auto f64 = type.AsFloat64();
if (f64.is_only_nan()) {
return Asm().Float64Constant(nan_v<64>);
}
if (auto c = f64.try_get_constant()) {
return Asm().Float64Constant(*c);
}
break;
}
default:
break;
}
// Keep unchanged.
return index;
}
}
Type GetType(const OpIndex index) { return types_[index]; }
private:
GrowingSidetable<Type>& types_;
};
template <typename Next>
using TypedOptimizationsReducer =
UniformReducerAdapter<TypedOptimizationsReducerImpl, Next>;
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_TYPED_OPTIMIZATIONS_REDUCER_H_

View File

@ -426,9 +426,10 @@ Handle<TurboshaftType> WordType<Bits>::AllocateOnHeap(Factory* factory) const {
template <size_t Bits>
bool FloatType<Bits>::Contains(float_t value) const {
if (IsMinusZero(value)) return has_minus_zero();
if (std::isnan(value)) return has_nan();
switch (sub_kind()) {
case SubKind::kOnlyNan:
case SubKind::kOnlySpecialValues:
return false;
case SubKind::kRange: {
return range_min() <= value && value <= range_max();
@ -447,7 +448,7 @@ bool FloatType<Bits>::Equals(const FloatType<Bits>& other) const {
if (sub_kind() != other.sub_kind()) return false;
if (special_values() != other.special_values()) return false;
switch (sub_kind()) {
case SubKind::kOnlyNan:
case SubKind::kOnlySpecialValues:
return true;
case SubKind::kRange: {
return range() == other.range();
@ -466,10 +467,9 @@ bool FloatType<Bits>::Equals(const FloatType<Bits>& other) const {
template <size_t Bits>
bool FloatType<Bits>::IsSubtypeOf(const FloatType<Bits>& other) const {
if (has_nan() && !other.has_nan()) return false;
if (special_values() & ~other.special_values()) return false;
switch (sub_kind()) {
case SubKind::kOnlyNan:
DCHECK(other.has_nan());
case SubKind::kOnlySpecialValues:
return true;
case SubKind::kRange:
if (!other.is_range()) {
@ -481,7 +481,7 @@ bool FloatType<Bits>::IsSubtypeOf(const FloatType<Bits>& other) const {
range_max() <= other.range_max();
case SubKind::kSet: {
switch (other.sub_kind()) {
case SubKind::kOnlyNan:
case SubKind::kOnlySpecialValues:
return false;
case SubKind::kRange:
return other.range_min() <= min() && max() <= other.range_max();
@ -500,22 +500,20 @@ template <size_t Bits>
FloatType<Bits> FloatType<Bits>::LeastUpperBound(const FloatType<Bits>& lhs,
const FloatType<Bits>& rhs,
Zone* zone) {
uint32_t special_values =
(lhs.has_nan() || rhs.has_nan()) ? Special::kNaN : 0;
uint32_t special_values = lhs.special_values() | rhs.special_values();
if (lhs.is_any() || rhs.is_any()) {
return Any(special_values);
}
const bool lhs_finite = lhs.is_set() || lhs.is_only_nan();
const bool rhs_finite = rhs.is_set() || rhs.is_only_nan();
const bool lhs_finite = lhs.is_set() || lhs.is_only_special_values();
const bool rhs_finite = rhs.is_set() || rhs.is_only_special_values();
if (lhs_finite && rhs_finite) {
base::SmallVector<float_t, kMaxSetSize * 2> result_elements;
if (lhs.is_set()) base::vector_append(result_elements, lhs.set_elements());
if (rhs.is_set()) base::vector_append(result_elements, rhs.set_elements());
if (result_elements.empty()) {
DCHECK_EQ(special_values, Special::kNaN);
return NaN();
return OnlySpecialValues(special_values);
}
base::sort(result_elements);
auto it = std::unique(result_elements.begin(), result_elements.end());
@ -538,18 +536,17 @@ template <size_t Bits>
Type FloatType<Bits>::Intersect(const FloatType<Bits>& lhs,
const FloatType<Bits>& rhs, Zone* zone) {
auto UpdateSpecials = [](const FloatType& t, uint32_t special_values) {
if (t.special_values() == special_values) return t;
auto result = t;
result.bitfield_ = special_values;
DCHECK_EQ(result.bitfield_, result.special_values());
return result;
};
const bool has_nan = lhs.has_nan() && rhs.has_nan();
if (lhs.is_any()) return UpdateSpecials(rhs, has_nan ? kNaN : 0);
if (rhs.is_any()) return UpdateSpecials(lhs, has_nan ? kNaN : 0);
if (lhs.is_only_nan() || rhs.is_only_nan()) {
return has_nan ? NaN() : Type::None();
const uint32_t special_values = lhs.special_values() & rhs.special_values();
if (lhs.is_any()) return UpdateSpecials(rhs, special_values);
if (rhs.is_any()) return UpdateSpecials(lhs, special_values);
if (lhs.is_only_special_values() || rhs.is_only_special_values()) {
return special_values ? OnlySpecialValues(special_values) : Type::None();
}
if (lhs.is_set() || rhs.is_set()) {
@ -561,34 +558,43 @@ Type FloatType<Bits>::Intersect(const FloatType<Bits>& lhs,
if (y.Contains(element)) result_elements.push_back(element);
}
if (result_elements.empty()) {
return has_nan ? NaN() : Type::None();
return special_values ? OnlySpecialValues(special_values) : Type::None();
}
DCHECK(detail::is_unique_and_sorted(result_elements));
return Set(result_elements, has_nan ? kNaN : 0, zone);
return Set(result_elements, special_values, zone);
}
DCHECK(lhs.is_range() && rhs.is_range());
const float_t result_min = std::min(lhs.min(), rhs.min());
const float_t result_max = std::max(lhs.max(), rhs.max());
if (result_min < result_max) {
return Range(result_min, result_max, has_nan ? kNaN : kNoSpecialValues,
zone);
return Range(result_min, result_max, special_values, zone);
} else if (result_min == result_max) {
return Set({result_min}, has_nan ? kNaN : 0, zone);
return Set({result_min}, special_values, zone);
}
return has_nan ? NaN() : Type::None();
return special_values ? OnlySpecialValues(special_values) : Type::None();
}
template <size_t Bits>
void FloatType<Bits>::PrintTo(std::ostream& stream) const {
auto PrintSpecials = [this](auto& stream) {
if (has_nan()) {
stream << "NaN" << (has_minus_zero() ? "|MinusZero" : "");
} else {
DCHECK(has_minus_zero());
stream << "MinusZero";
}
};
stream << (Bits == 32 ? "Float32" : "Float64");
switch (sub_kind()) {
case SubKind::kOnlyNan:
stream << "NaN";
case SubKind::kOnlySpecialValues:
PrintSpecials(stream);
break;
case SubKind::kRange:
stream << "[" << range_min() << ", " << range_max()
<< (has_nan() ? "]+NaN" : "]");
stream << "[" << range_min() << ", " << range_max() << "]";
if (has_special_values()) {
stream << "|";
PrintSpecials(stream);
}
break;
case SubKind::kSet:
stream << "{";
@ -596,7 +602,12 @@ void FloatType<Bits>::PrintTo(std::ostream& stream) const {
if (i != 0) stream << ", ";
stream << set_element(i);
}
stream << (has_nan() ? "}+NaN" : "}");
if (has_special_values()) {
stream << "}|";
PrintSpecials(stream);
} else {
stream << "}";
}
break;
}
}
@ -608,16 +619,16 @@ Handle<TurboshaftType> FloatType<Bits>::AllocateOnHeap(Factory* factory) const {
if (is_only_nan()) {
min = std::numeric_limits<float_t>::infinity();
max = -std::numeric_limits<float_t>::infinity();
return factory->NewTurboshaftFloat64RangeType(1, padding, min, max,
AllocationType::kYoung);
return factory->NewTurboshaftFloat64RangeType(
special_values(), padding, min, max, AllocationType::kYoung);
} else if (is_range()) {
std::tie(min, max) = minmax();
return factory->NewTurboshaftFloat64RangeType(
has_nan() ? 1 : 0, padding, min, max, AllocationType::kYoung);
special_values(), padding, min, max, AllocationType::kYoung);
} else {
DCHECK(is_set());
auto result = factory->NewTurboshaftFloat64SetType(
has_nan() ? 1 : 0, set_size(), AllocationType::kYoung);
special_values(), set_size(), AllocationType::kYoung);
for (int i = 0; i < set_size(); ++i) {
result->set_elements(i, set_element(i));
}

View File

@ -14,6 +14,7 @@
#include "src/base/small-vector.h"
#include "src/common/globals.h"
#include "src/compiler/turboshaft/fast-hash.h"
#include "src/numbers/conversions.h"
#include "src/objects/turboshaft-types.h"
#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
@ -37,6 +38,11 @@ inline bool is_unique_and_sorted(const T& container) {
return true;
}
template <typename T>
inline bool is_float_special_value(T value) {
return std::isnan(value) || IsMinusZero(value);
}
template <size_t Bits>
struct TypeForBits;
template <>
@ -439,7 +445,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FloatType : public Type {
enum class SubKind : uint8_t {
kRange,
kSet,
kOnlyNan,
kOnlySpecialValues,
};
public:
@ -450,13 +456,25 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FloatType : public Type {
enum Special : uint32_t {
kNoSpecialValues = 0x0,
kNaN = 0x1,
kMinusZero = 0x2,
};
// Constructors
static FloatType NaN() {
return FloatType{SubKind::kOnlyNan, 0, Special::kNaN, Payload_OnlyNan{}};
static FloatType OnlySpecialValues(uint32_t special_values) {
DCHECK_NE(0, special_values);
return FloatType{SubKind::kOnlySpecialValues, 0, special_values,
Payload_OnlySpecial{}};
}
static FloatType Any(uint32_t special_values = Special::kNaN) {
static FloatType NaN() {
return FloatType{SubKind::kOnlySpecialValues, 0, Special::kNaN,
Payload_OnlySpecial{}};
}
static FloatType MinusZero() {
return FloatType{SubKind::kOnlySpecialValues, 0, Special::kMinusZero,
Payload_OnlySpecial{}};
}
static FloatType Any(uint32_t special_values = Special::kNaN |
Special::kMinusZero) {
return FloatType::Range(-std::numeric_limits<float_t>::infinity(),
std::numeric_limits<float_t>::infinity(),
special_values, nullptr);
@ -466,8 +484,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FloatType : public Type {
}
static FloatType Range(float_t min, float_t max, uint32_t special_values,
Zone* zone) {
DCHECK(!std::isnan(min));
DCHECK(!std::isnan(max));
DCHECK(!detail::is_float_special_value(min));
DCHECK(!detail::is_float_special_value(max));
DCHECK_LE(min, max);
if (min == max) return Set({min}, zone);
return FloatType{SubKind::kRange, 0, special_values,
@ -501,7 +519,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FloatType : public Type {
uint32_t special_values, Zone* zone) {
DCHECK(detail::is_unique_and_sorted(elements));
// NaN should be passed via {special_values} rather than {elements}.
DCHECK(base::none_of(elements, [](float_t f) { return std::isnan(f); }));
DCHECK(base::none_of(
elements, [](float_t f) { return detail::is_float_special_value(f); }));
DCHECK_IMPLIES(elements.size() > kMaxInlineSetSize, zone != nullptr);
DCHECK_GT(elements.size(), 0);
DCHECK_LE(elements.size(), kMaxSetSize);
@ -529,9 +548,12 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FloatType : public Type {
}
// Checks
bool is_only_nan() const {
DCHECK_IMPLIES(sub_kind() == SubKind::kOnlyNan, has_nan());
return sub_kind() == SubKind::kOnlyNan;
bool is_only_special_values() const {
return sub_kind() == SubKind::kOnlySpecialValues;
}
bool is_only_nan() const { return is_only_special_values() && has_nan(); }
bool is_only_minus_zero() const {
return is_only_special_values() && has_minus_zero();
}
bool is_range() const { return sub_kind() == SubKind::kRange; }
bool is_set() const { return sub_kind() == SubKind::kSet; }
@ -542,10 +564,14 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FloatType : public Type {
}
bool is_constant() const {
DCHECK_EQ(set_size_ > 0, is_set());
return set_size_ == 1 && !has_nan();
return set_size_ == 1 && !has_special_values();
}
uint32_t special_values() const { return bitfield_; }
bool has_special_values() const { return special_values() != 0; }
bool has_nan() const { return (special_values() & Special::kNaN) != 0; }
bool has_minus_zero() const {
return (special_values() & Special::kMinusZero) != 0;
}
// Accessors
float_t range_min() const {
@ -582,7 +608,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FloatType : public Type {
}
float_t min() const {
switch (sub_kind()) {
case SubKind::kOnlyNan:
case SubKind::kOnlySpecialValues:
if (has_minus_zero()) return float_t{-0.0};
DCHECK(is_only_nan());
return nan_v<Bits>;
case SubKind::kRange:
return range_min();
@ -592,7 +620,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FloatType : public Type {
}
float_t max() const {
switch (sub_kind()) {
case SubKind::kOnlyNan:
case SubKind::kOnlySpecialValues:
if (has_minus_zero()) return float_t{-0.0};
DCHECK(is_only_nan());
return nan_v<Bits>;
case SubKind::kRange:
return range_max();
@ -624,14 +654,14 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FloatType : public Type {
using Payload_Range = detail::Payload_Range<float_t>;
using Payload_InlineSet = detail::Payload_InlineSet<float_t>;
using Payload_OutlineSet = detail::Payload_OutlineSet<float_t>;
using Payload_OnlyNan = detail::Payload_Empty;
using Payload_OnlySpecial = detail::Payload_Empty;
template <typename Payload>
FloatType(SubKind sub_kind, uint8_t set_size, uint32_t special_values,
const Payload& payload)
: Type(KIND, static_cast<uint8_t>(sub_kind), set_size, special_values, 0,
payload) {
DCHECK_EQ(special_values & ~Special::kNaN, 0);
DCHECK_EQ(special_values & ~(Special::kNaN | Special::kMinusZero), 0);
}
};

View File

@ -0,0 +1,73 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_UNIFORM_REDUCER_ADAPTER_H_
#define V8_COMPILER_TURBOSHAFT_UNIFORM_REDUCER_ADAPTER_H_
#include "src/compiler/turboshaft/operations.h"
namespace v8::internal::compiler::turboshaft {
// UniformReducerAdapater allows to handle all operations uniformly during a
// reduction by wiring all ReduceXyz calls through a single ReduceOperation
// method. This is how to use it (MyReducer can then be used in a ReducerStack
// like any other reducer):
//
// template <typename Next>
// class MyReducerImpl : public Next {
// public:
// using Next::Asm;
// template <typename... Args>
// explicit MyReducerImpl(const std::tuple<Args...>& args)
// : Next(args) { /* ... */ }
//
// template <Opcode opcode, typename Continuation, typename... Args>
// OpIndex ReduceOperation(Args... args) {
//
// /* ... */
//
// // Forward to Next reducer.
// OpIndex index = Continuation{this}.Reduce(args...);
//
// /* ... */
//
// return index;
// }
//
// private:
// /* ... */
// };
//
// template <typename Next>
// using MyReducer = UniformReducerAdapater<MyReducerImpl, Next>;
//
template <template <typename> typename Impl, typename Next>
class UniformReducerAdapter : public Impl<Next> {
public:
template <typename... Args>
explicit UniformReducerAdapter(const std::tuple<Args...>& args)
: Impl<Next>(args) {}
#define REDUCE(op) \
struct Reduce##op##Continuation final { \
explicit Reduce##op##Continuation(Next* _this) : this_(_this) {} \
template <typename... Args> \
OpIndex Reduce(Args... args) const { \
return this_->Reduce##op(args...); \
} \
Next* this_; \
}; \
template <typename... Args> \
OpIndex Reduce##op(Args... args) { \
return Impl<Next>::template ReduceOperation<Opcode::k##op, \
Reduce##op##Continuation>( \
args...); \
}
TURBOSHAFT_OPERATION_LIST(REDUCE)
#undef REDUCE
};
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_UNIFORM_REDUCER_ADAPTER_H_

View File

@ -374,8 +374,10 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TraceScheduleAndVerify) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildTurboshaft) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, OptimizeTurboshaft) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftDeadCodeElimination) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftRecreateSchedule) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftTypeInference) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftTypeAssertions) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftTypedOptimizations) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypeAssertions) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypedLowering) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Typer) \

View File

@ -16,6 +16,11 @@ namespace v8::internal {
#include "torque-generated/src/objects/turboshaft-types-tq.inc"
class TurboshaftFloatSpecialValues {
public:
DEFINE_TORQUE_GENERATED_TURBOSHAFT_FLOAT_SPECIAL_VALUES()
};
class TurboshaftType
: public TorqueGeneratedTurboshaftType<TurboshaftType, HeapObject> {
public:

View File

@ -4,6 +4,12 @@
#include "src/objects/turboshaft-types.h"
bitfield struct TurboshaftFloatSpecialValues extends uint32 {
nan: bool: 1 bit;
minus_zero: bool: 1 bit;
_unused: uint32: 30 bit;
}
@abstract
extern class TurboshaftType extends HeapObject {
}
@ -59,7 +65,7 @@ extern class TurboshaftWord64SetType extends TurboshaftWord64Type {
@generateUniqueMap
@generateFactoryFunction
extern class TurboshaftFloat64Type extends TurboshaftType {
has_nan: uint32;
special_values: TurboshaftFloatSpecialValues;
}
@generateBodyDescriptor
@ -145,7 +151,8 @@ macro TestTurboshaftWord64Type(
macro TestTurboshaftFloat64Type(
value: float64, expected: TurboshaftFloat64Type): bool {
if (Float64IsNaN(value)) return expected.has_nan != 0;
if (Float64IsNaN(value)) return expected.special_values.nan;
if (IsMinusZero(value)) return expected.special_values.minus_zero;
typeswitch (expected) {
case (range: TurboshaftFloat64RangeType): {
return range.min <= value && value <= range.max;

View File

@ -13,6 +13,13 @@
function use() {}
%NeverOptimizeFunction(use);
function constants() {
use(%CheckTypeOf(3, "Word64{6}")); // smi-tagged value 3 in 64 bit register
// Cannot check this currently, because NumberConstants are not yet supported
// in the typer.
// use(%CheckTypeOf(5.5, "Float64{5.5}"));
}
function add1(x) {
let a = x ? 3 : 7;
let r = -1;
@ -50,6 +57,7 @@ function div2(x) {
let result = r - 0.5;
return %CheckTypeOf(result, "Float64[2.49999,2.50001]");
}
*/
//function min2(x) {
// let a = x ? 3.3 : 6.6;
@ -69,7 +77,38 @@ function div2(x) {
// return %CheckTypeOf(result, "Float64{6}");
//}
let targets = [ constants, add1, add2, mul2, div2, min2, max2 ];
function add_dce(x) {
let a = x ? 3 : 7;
let r = -1;
if (a < 5) r = a + 2;
else r = a - 2;
let result = r + 1;
return result;
}
function loop_dce(x) {
let limit = x ? 50 : 100;
let sum = 0;
for(let i = 1; i <= limit; ++i) {
sum += i;
}
let a = sum > 5000 ? 3 : 7;
let r = -1;
if(a < 5) r = a + 2;
else r = a - 2;
let result = r + 1;
return result;
// TODO(nicohartmann@): DCE should support merging identical return blocks.
// if(sum > 5000) {
// return true;
// } else {
// return true;
// }
}
//let targets = [ constants, add1, add2, mul2, div2, /*min2, max2*/ ];
let targets = [ add_dce, loop_dce ];
for(let f of targets) {
%PrepareFunctionForOptimization(f);
f(true);
@ -77,4 +116,3 @@ for(let f of targets) {
%OptimizeFunctionOnNextCall(f);
f(true);
}
*/

View File

@ -0,0 +1,39 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --turboshaft --allow-natives-syntax
function add1(x) {
let a = x ? 3 : 7; // a = {3, 7}
let r = -1; // r = {-1}
if (a < 5) // then: a = {3}
r = a + 2; // r = {5}
else // else: a = {7}
r = a - 2; // r = {5}
const result = r + 1; // result = {6}
// TODO(nicohartmann@): When we have a platform independent way to do that,
// add a %CheckTurboshaftTypeOf to verify the type.
return result;
}
function loop1(x) {
let result = 0;
for(let i = 0; i < 10; ++i) {
result += i;
}
// TODO(nicohartmann@): When we have a platform independent way to do that,
// add a %CheckTurboshaftTypeOf to verify the type.
return result;
}
let targets = [ add1, loop1 ];
for(let f of targets) {
%PrepareFunctionForOptimization(f);
const expected_true = f(true);
const expected_false = f(false);
%OptimizeFunctionOnNextCall(f);
assertEquals(expected_true, f(true));
assertEquals(expected_false, f(false));
}

View File

@ -271,15 +271,16 @@ TEST_F(TurboshaftTypesTest, Float32) {
std::numeric_limits<Float32Type::float_t>::max() * 0.99f;
const auto inf = std::numeric_limits<Float32Type::float_t>::infinity();
const auto kNaN = Float32Type::kNaN;
const auto kMinusZero = Float32Type::kMinusZero;
const auto kNoSpecialValues = Float32Type::kNoSpecialValues;
// Complete range (with NaN)
for (bool with_nan : {false, true}) {
uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
Float32Type t = Float32Type::Any(kNaN);
uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
Float32Type t = Float32Type::Any(kNaN | kMinusZero);
EXPECT_TRUE(Float32Type::NaN().IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(0.0f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(-0.0f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::MinusZero().IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(391.113f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Set({0.13f, 91.0f}, sv, zone()).IsSubtypeOf(t));
EXPECT_TRUE(
@ -294,11 +295,11 @@ TEST_F(TurboshaftTypesTest, Float32) {
// Complete range (without NaN)
for (bool with_nan : {false, true}) {
uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
Float32Type t = Float32Type::Any(kNoSpecialValues);
uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
Float32Type t = Float32Type::Any(kMinusZero);
EXPECT_TRUE(!Float32Type::NaN().IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(0.0f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(-0.0f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::MinusZero().IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(391.113f).IsSubtypeOf(t));
EXPECT_EQ(!with_nan,
Float32Type::Set({0.13f, 91.0f}, sv, zone()).IsSubtypeOf(t));
@ -319,18 +320,19 @@ TEST_F(TurboshaftTypesTest, Float32) {
// Range (with NaN)
for (bool with_nan : {false, true}) {
uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
Float32Type t = Float32Type::Range(-1.0f, 3.14159f, kNaN, zone());
uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
Float32Type t =
Float32Type::Range(-1.0f, 3.14159f, kNaN | kMinusZero, zone());
EXPECT_TRUE(Float32Type::NaN().IsSubtypeOf(t));
EXPECT_TRUE(!Float32Type::Constant(-100.0f).IsSubtypeOf(t));
EXPECT_TRUE(!Float32Type::Constant(-1.01f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(-1.0f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(-0.99f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(-0.0f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::MinusZero().IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(0.0f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(3.14159f).IsSubtypeOf(t));
EXPECT_TRUE(!Float32Type::Constant(3.15f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Set({-0.5f, -0.0f}, sv, zone()).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Set({-0.5f}, sv, zone()).IsSubtypeOf(t));
EXPECT_TRUE(!Float32Type::Set({-1.1f, 1.5f}, sv, zone()).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Set({-0.9f, 1.88f}, sv, zone()).IsSubtypeOf(t));
EXPECT_TRUE(!Float32Type::Set({0.0f, 3.142f}, sv, zone()).IsSubtypeOf(t));
@ -347,20 +349,18 @@ TEST_F(TurboshaftTypesTest, Float32) {
// Range (without NaN)
for (bool with_nan : {false, true}) {
uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
Float32Type t =
Float32Type::Range(-1.0f, 3.14159f, kNoSpecialValues, zone());
uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
Float32Type t = Float32Type::Range(-1.0f, 3.14159f, kMinusZero, zone());
EXPECT_TRUE(!Float32Type::NaN().IsSubtypeOf(t));
EXPECT_TRUE(!Float32Type::Constant(-100.0f).IsSubtypeOf(t));
EXPECT_TRUE(!Float32Type::Constant(-1.01f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(-1.0f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(-0.99f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(-0.0f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::MinusZero().IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(0.0f).IsSubtypeOf(t));
EXPECT_TRUE(Float32Type::Constant(3.14159f).IsSubtypeOf(t));
EXPECT_TRUE(!Float32Type::Constant(3.15f).IsSubtypeOf(t));
EXPECT_EQ(!with_nan,
Float32Type::Set({-0.5f, -0.0f}, sv, zone()).IsSubtypeOf(t));
EXPECT_EQ(!with_nan, Float32Type::Set({-0.5f}, sv, zone()).IsSubtypeOf(t));
EXPECT_TRUE(!Float32Type::Set({-1.1f, 1.5f}, sv, zone()).IsSubtypeOf(t));
EXPECT_EQ(!with_nan,
Float32Type::Set({-0.9f, 1.88f}, sv, zone()).IsSubtypeOf(t));
@ -434,14 +434,15 @@ TEST_F(TurboshaftTypesTest, Float32) {
// -0.0f corner cases
{
EXPECT_TRUE(Float32Type::Constant(-0.0f).IsSubtypeOf(
EXPECT_TRUE(!Float32Type::MinusZero().IsSubtypeOf(
Float32Type::Set({0.0f, 1.0f}, zone())));
EXPECT_TRUE(
Float32Type::Constant(0.0f).IsSubtypeOf(Float32Type::Constant(-0.0f)));
EXPECT_TRUE(Float32Type::Set({-0.0f, 3.2f}, zone())
.IsSubtypeOf(Float32Type::Range(0.0f, 4.0f, zone())));
EXPECT_TRUE(Float32Type::Set({-1.0f, 0.0f}, zone())
.IsSubtypeOf(Float32Type::Range(-inf, -0.0f, zone())));
!Float32Type::Constant(0.0f).IsSubtypeOf(Float32Type::MinusZero()));
EXPECT_TRUE(
Float32Type::Set({3.2f}, kMinusZero, zone())
.IsSubtypeOf(Float32Type::Range(0.0f, 4.0f, kMinusZero, zone())));
EXPECT_TRUE(!Float32Type::Set({-1.0f, 0.0f}, kMinusZero, zone())
.IsSubtypeOf(Float32Type::Range(-inf, 0.0f, zone())));
}
}
@ -450,15 +451,16 @@ TEST_F(TurboshaftTypesTest, Float64) {
std::numeric_limits<Float64Type::float_t>::max() * 0.99;
const auto inf = std::numeric_limits<Float64Type::float_t>::infinity();
const auto kNaN = Float64Type::kNaN;
const auto kMinusZero = Float64Type::kMinusZero;
const auto kNoSpecialValues = Float64Type::kNoSpecialValues;
// Complete range (with NaN)
for (bool with_nan : {false, true}) {
uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
Float64Type t = Float64Type::Any(kNaN);
uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
Float64Type t = Float64Type::Any(kNaN | kMinusZero);
EXPECT_TRUE(Float64Type::NaN().IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(0.0).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(-0.0).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::MinusZero().IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(391.113).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Set({0.13, 91.0}, sv, zone()).IsSubtypeOf(t));
EXPECT_TRUE(
@ -473,11 +475,11 @@ TEST_F(TurboshaftTypesTest, Float64) {
// Complete range (without NaN)
for (bool with_nan : {false, true}) {
uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
Float64Type t = Float64Type::Any(kNoSpecialValues);
uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
Float64Type t = Float64Type::Any(kMinusZero);
EXPECT_TRUE(!Float64Type::NaN().IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(0.0).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(-0.0).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::MinusZero().IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(391.113).IsSubtypeOf(t));
EXPECT_EQ(!with_nan,
Float64Type::Set({0.13, 91.0}, sv, zone()).IsSubtypeOf(t));
@ -498,18 +500,19 @@ TEST_F(TurboshaftTypesTest, Float64) {
// Range (with NaN)
for (bool with_nan : {false, true}) {
uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
Float64Type t = Float64Type::Range(-1.0, 3.14159, kNaN, zone());
uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
Float64Type t =
Float64Type::Range(-1.0, 3.14159, kNaN | kMinusZero, zone());
EXPECT_TRUE(Float64Type::NaN().IsSubtypeOf(t));
EXPECT_TRUE(!Float64Type::Constant(-100.0).IsSubtypeOf(t));
EXPECT_TRUE(!Float64Type::Constant(-1.01).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(-1.0).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(-0.99).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(-0.0).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::MinusZero().IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(0.0).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(3.14159).IsSubtypeOf(t));
EXPECT_TRUE(!Float64Type::Constant(3.15).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Set({-0.5, -0.0}, sv, zone()).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Set({-0.5}, sv, zone()).IsSubtypeOf(t));
EXPECT_TRUE(!Float64Type::Set({-1.1, 1.5}, sv, zone()).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Set({-0.9, 1.88}, sv, zone()).IsSubtypeOf(t));
EXPECT_TRUE(!Float64Type::Set({0.0, 3.142}, sv, zone()).IsSubtypeOf(t));
@ -526,19 +529,18 @@ TEST_F(TurboshaftTypesTest, Float64) {
// Range (without NaN)
for (bool with_nan : {false, true}) {
uint32_t sv = with_nan ? kNaN : kNoSpecialValues;
Float64Type t = Float64Type::Range(-1.0, 3.14159, kNoSpecialValues, zone());
uint32_t sv = kMinusZero | (with_nan ? kNaN : kNoSpecialValues);
Float64Type t = Float64Type::Range(-1.0, 3.14159, kMinusZero, zone());
EXPECT_TRUE(!Float64Type::NaN().IsSubtypeOf(t));
EXPECT_TRUE(!Float64Type::Constant(-100.0).IsSubtypeOf(t));
EXPECT_TRUE(!Float64Type::Constant(-1.01).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(-1.0).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(-0.99).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(-0.0).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::MinusZero().IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(0.0).IsSubtypeOf(t));
EXPECT_TRUE(Float64Type::Constant(3.14159).IsSubtypeOf(t));
EXPECT_TRUE(!Float64Type::Constant(3.15).IsSubtypeOf(t));
EXPECT_EQ(!with_nan,
Float64Type::Set({-0.5, -0.0}, sv, zone()).IsSubtypeOf(t));
EXPECT_EQ(!with_nan, Float64Type::Set({-0.5}, sv, zone()).IsSubtypeOf(t));
EXPECT_TRUE(!Float64Type::Set({-1.1, 1.5}, sv, zone()).IsSubtypeOf(t));
EXPECT_EQ(!with_nan,
Float64Type::Set({-0.9, 1.88}, sv, zone()).IsSubtypeOf(t));
@ -607,16 +609,18 @@ TEST_F(TurboshaftTypesTest, Float64) {
EXPECT_TRUE(t.IsSubtypeOf(t));
}
// -0.0f corner cases
// -0.0 corner cases
{
EXPECT_TRUE(Float64Type::Constant(-0.0).IsSubtypeOf(
EXPECT_TRUE(!Float64Type::MinusZero().IsSubtypeOf(
Float64Type::Set({0.0, 1.0}, zone())));
EXPECT_TRUE(
Float64Type::Constant(0.0).IsSubtypeOf(Float64Type::Constant(-0.0)));
EXPECT_TRUE(Float64Type::Set({-0.0, 3.2}, zone())
.IsSubtypeOf(Float64Type::Range(0.0, 4.0, zone())));
EXPECT_TRUE(Float64Type::Set({-1.0, 0.0}, zone())
.IsSubtypeOf(Float64Type::Range(-inf, -0.0, zone())));
!Float64Type::Constant(0.0).IsSubtypeOf(Float64Type::MinusZero()));
EXPECT_TRUE(
Float64Type::Set({3.2}, kMinusZero, zone())
.IsSubtypeOf(Float64Type::Range(0.0, 4.0, kMinusZero, zone())));
EXPECT_TRUE(
Float64Type::Set({0.0}, kMinusZero, zone())
.IsSubtypeOf(Float64Type::Range(-inf, 0.0, kMinusZero, zone())));
}
}