Reland "[turboshaft] port MachineOperatorReducer"

This is a reland of commit ea67ec63d2

Original change's description:
> [turboshaft] port MachineOperatorReducer
>
> Bug: v8:12783
> Change-Id: I9b3db78d8a70aead38836e6ccd4b2a76d6f1eb94
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3872269
> Reviewed-by: Manos Koukoutos <manoskouk@chromium.org>
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#83602}

Bug: v8:12783
Change-Id: I9d7110dbd26a8f617e191a6d662ea73b322f71bd
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3942386
Auto-Submit: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Reviewed-by: Manos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83624}
This commit is contained in:
Tobias Tebbi 2022-10-10 17:54:40 +02:00 committed by V8 LUCI CQ
parent 524b663f63
commit 52b85cbfde
24 changed files with 2908 additions and 258 deletions

View File

@ -2885,8 +2885,10 @@ filegroup(
"src/compiler/turboshaft/graph.h",
"src/compiler/turboshaft/graph-visualizer.cc",
"src/compiler/turboshaft/graph-visualizer.h",
"src/compiler/turboshaft/machine-optimization-assembler.h",
"src/compiler/turboshaft/operations.cc",
"src/compiler/turboshaft/operations.h",
"src/compiler/turboshaft/operation-matching.h",
"src/compiler/turboshaft/optimization-phase.cc",
"src/compiler/turboshaft/optimization-phase.h",
"src/compiler/turboshaft/recreate-schedule.cc",
@ -2896,6 +2898,7 @@ filegroup(
"src/compiler/turboshaft/sidetable.h",
"src/compiler/turboshaft/simplify-tf-loops.cc",
"src/compiler/turboshaft/simplify-tf-loops.h",
"src/compiler/turboshaft/utils.cc",
"src/compiler/turboshaft/utils.h",
"src/compiler/turboshaft/value-numbering-assembler.h",
"src/compiler/type-cache.cc",

View File

@ -2921,6 +2921,8 @@ v8_header_set("v8_internal_headers") {
"src/compiler/turboshaft/graph-builder.h",
"src/compiler/turboshaft/graph-visualizer.h",
"src/compiler/turboshaft/graph.h",
"src/compiler/turboshaft/machine-optimization-assembler.h",
"src/compiler/turboshaft/operation-matching.h",
"src/compiler/turboshaft/operations.h",
"src/compiler/turboshaft/optimization-phase.h",
"src/compiler/turboshaft/recreate-schedule.h",
@ -4232,6 +4234,7 @@ v8_source_set("v8_turboshaft") {
"src/compiler/turboshaft/recreate-schedule.cc",
"src/compiler/turboshaft/representations.cc",
"src/compiler/turboshaft/simplify-tf-loops.cc",
"src/compiler/turboshaft/utils.cc",
]
public_deps = [

View File

@ -106,12 +106,22 @@ int32_t SignedDiv32(int32_t lhs, int32_t rhs) {
return lhs / rhs;
}
int64_t SignedDiv64(int64_t lhs, int64_t rhs) {
if (rhs == 0) return 0;
if (rhs == -1) return lhs == std::numeric_limits<int64_t>::min() ? lhs : -lhs;
return lhs / rhs;
}
int32_t SignedMod32(int32_t lhs, int32_t rhs) {
if (rhs == 0 || rhs == -1) return 0;
return lhs % rhs;
}
int64_t SignedMod64(int64_t lhs, int64_t rhs) {
if (rhs == 0 || rhs == -1) return 0;
return lhs % rhs;
}
int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs) {
using limits = std::numeric_limits<int64_t>;
// Underflow if {lhs + rhs < min}. In that case, return {min}.

View File

@ -70,6 +70,30 @@ T ReverseBits(T value) {
return result;
}
// ReverseBytes(value) returns |value| in reverse byte order.
template <typename T>
T ReverseBytes(T value) {
static_assert((sizeof(value) == 1) || (sizeof(value) == 2) ||
(sizeof(value) == 4) || (sizeof(value) == 8));
T result = 0;
for (unsigned i = 0; i < sizeof(value); i++) {
result = (result << 8) | (value & 0xff);
value >>= 8;
}
return result;
}
template <class T>
inline constexpr std::make_unsigned_t<T> Unsigned(T value) {
static_assert(std::is_signed_v<T>);
return static_cast<std::make_unsigned_t<T>>(value);
}
template <class T>
inline constexpr std::make_signed_t<T> Signed(T value) {
static_assert(std::is_unsigned_v<T>);
return static_cast<std::make_signed_t<T>>(value);
}
// CountLeadingZeros(value) returns the number of zero bits following the most
// significant 1 bit in |value| if |value| is non-zero, otherwise it returns
// {sizeof(T) * 8}.
@ -104,6 +128,15 @@ inline constexpr unsigned CountLeadingZeros64(uint64_t value) {
return CountLeadingZeros(value);
}
// The number of leading zeros for a positive number,
// the number of leading ones for a negative number.
template <class T>
constexpr unsigned CountLeadingSignBits(T value) {
static_assert(std::is_signed_v<T>);
return value < 0 ? CountLeadingZeros(~Unsigned(value))
: CountLeadingZeros(Unsigned(value));
}
// CountTrailingZeros(value) returns the number of zero bits preceding the
// least significant 1 bit in |value| if |value| is non-zero, otherwise it
// returns {sizeof(T) * 8}.
@ -323,11 +356,21 @@ V8_BASE_EXPORT int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs,
// is minint and |rhs| is -1, it returns minint.
V8_BASE_EXPORT int32_t SignedDiv32(int32_t lhs, int32_t rhs);
// SignedDiv64(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
// truncated to int64. If |rhs| is zero, then zero is returned. If |lhs|
// is minint and |rhs| is -1, it returns minint.
V8_BASE_EXPORT int64_t SignedDiv64(int64_t lhs, int64_t rhs);
// SignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
// truncated to int32. If either |rhs| is zero or |lhs| is minint and |rhs|
// is -1, it returns zero.
V8_BASE_EXPORT int32_t SignedMod32(int32_t lhs, int32_t rhs);
// SignedMod64(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
// truncated to int64. If either |rhs| is zero or |lhs| is minint and |rhs|
// is -1, it returns zero.
V8_BASE_EXPORT int64_t SignedMod64(int64_t lhs, int64_t rhs);
// UnsignedAddOverflow32(lhs,rhs,val) performs an unsigned summation of |lhs|
// and |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the unsigned summation resulted in an overflow.
@ -347,6 +390,11 @@ inline uint32_t UnsignedDiv32(uint32_t lhs, uint32_t rhs) {
return rhs ? lhs / rhs : 0u;
}
// UnsignedDiv64(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
// truncated to uint64. If |rhs| is zero, then zero is returned.
inline uint64_t UnsignedDiv64(uint64_t lhs, uint64_t rhs) {
return rhs ? lhs / rhs : 0u;
}
// UnsignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
// truncated to uint32. If |rhs| is zero, then zero is returned.
@ -354,6 +402,12 @@ inline uint32_t UnsignedMod32(uint32_t lhs, uint32_t rhs) {
return rhs ? lhs % rhs : 0u;
}
// UnsignedMod64(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
// truncated to uint64. If |rhs| is zero, then zero is returned.
inline uint64_t UnsignedMod64(uint64_t lhs, uint64_t rhs) {
return rhs ? lhs % rhs : 0u;
}
// Wraparound integer arithmetic without undefined behavior.
inline int32_t WraparoundAdd32(int32_t lhs, int32_t rhs) {

View File

@ -6,15 +6,16 @@
#include <stdint.h>
#include <type_traits>
#include "src/base/logging.h"
#include "src/base/macros.h"
namespace v8 {
namespace base {
template <class T>
template <class T, std::enable_if_t<std::is_unsigned_v<T>, bool>>
MagicNumbersForDivision<T> SignedDivisionByConstant(T d) {
static_assert(static_cast<T>(0) < static_cast<T>(-1));
DCHECK(d != static_cast<T>(-1) && d != 0 && d != 1);
const unsigned bits = static_cast<unsigned>(sizeof(T)) * 8;
const T min = (static_cast<T>(1) << (bits - 1));
@ -48,11 +49,10 @@ MagicNumbersForDivision<T> SignedDivisionByConstant(T d) {
return MagicNumbersForDivision<T>(neg ? (0 - mul) : mul, p - bits, false);
}
template <class T>
MagicNumbersForDivision<T> UnsignedDivisionByConstant(T d,
unsigned leading_zeros) {
static_assert(static_cast<T>(0) < static_cast<T>(-1));
static_assert(std::is_unsigned_v<T>);
DCHECK_NE(d, 0);
const unsigned bits = static_cast<unsigned>(sizeof(T)) * 8;
const T ones = ~static_cast<T>(0) >> leading_zeros;

View File

@ -7,6 +7,9 @@
#include <stdint.h>
#include <tuple>
#include <type_traits>
#include "src/base/base-export.h"
#include "src/base/export-template.h"
@ -16,10 +19,10 @@ namespace base {
// ----------------------------------------------------------------------------
// The magic numbers for division via multiplication, see Warren's "Hacker's
// Delight", chapter 10. The template parameter must be one of the unsigned
// integral types.
// Delight", chapter 10.
template <class T>
struct EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT) MagicNumbersForDivision {
static_assert(std::is_integral_v<T>);
MagicNumbersForDivision(T m, unsigned s, bool a)
: multiplier(m), shift(s), add(a) {}
bool operator==(const MagicNumbersForDivision& rhs) const {
@ -31,13 +34,20 @@ struct EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT) MagicNumbersForDivision {
bool add;
};
// Calculate the multiplier and shift for signed division via multiplication.
// The divisor must not be -1, 0 or 1 when interpreted as a signed value.
template <class T>
template <class T, std::enable_if_t<std::is_unsigned_v<T>, bool> = true>
EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT)
MagicNumbersForDivision<T> SignedDivisionByConstant(T d);
template <class T, std::enable_if_t<std::is_signed_v<T>, bool> = true>
MagicNumbersForDivision<T> SignedDivisionByConstant(T d) {
using Unsigned = std::make_unsigned_t<T>;
MagicNumbersForDivision<Unsigned> magic =
SignedDivisionByConstant(static_cast<Unsigned>(d));
return {static_cast<T>(magic.multiplier), magic.shift, magic.add};
}
// Calculate the multiplier and shift for unsigned division via multiplication,
// see Warren's "Hacker's Delight", chapter 10. The divisor must not be 0 and
// leading_zeros can be used to speed up the calculation if the given number of

View File

@ -97,4 +97,12 @@ const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
#define V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
#endif
// The biggest double value that fits within the int64_t/uint64_t value range.
// This is different from safe integer range in that there are gaps of integers
// in-between that cannot be represented as a double.
constexpr double kMaxDoubleRepresentableInt64 = 9223372036854774784.0;
constexpr double kMinDoubleRepresentableInt64 =
std::numeric_limits<int64_t>::min();
constexpr double kMaxDoubleRepresentableUint64 = 18446744073709549568.0;
#endif // V8_COMPILER_GLOBALS_H_

View File

@ -83,6 +83,7 @@
#include "src/compiler/turboshaft/graph-builder.h"
#include "src/compiler/turboshaft/graph-visualizer.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/machine-optimization-assembler.h"
#include "src/compiler/turboshaft/optimization-phase.h"
#include "src/compiler/turboshaft/recreate-schedule.h"
#include "src/compiler/turboshaft/simplify-tf-loops.h"
@ -2047,9 +2048,12 @@ struct BuildTurboshaftPhase {
Schedule* schedule = data->schedule();
data->reset_schedule();
data->CreateTurboshaftGraph();
return turboshaft::BuildGraph(
schedule, data->graph_zone(), temp_zone, &data->turboshaft_graph(),
data->source_positions(), data->node_origins());
if (auto bailout = turboshaft::BuildGraph(
schedule, data->graph_zone(), temp_zone, &data->turboshaft_graph(),
data->source_positions(), data->node_origins())) {
return bailout;
}
return {};
}
};
@ -2057,8 +2061,12 @@ struct OptimizeTurboshaftPhase {
DECL_PIPELINE_PHASE_CONSTANTS(OptimizeTurboshaft)
void Run(PipelineData* data, Zone* temp_zone) {
turboshaft::OptimizationPhase<turboshaft::LivenessAnalyzer,
turboshaft::ValueNumberingAssembler>::
UnparkedScopeIfNeeded scope(data->broker(),
FLAG_turboshaft_trace_reduction);
turboshaft::OptimizationPhase<
turboshaft::AnalyzerBase,
turboshaft::MachineOptimizationAssembler<
turboshaft::ValueNumberingAssembler, false>>::
Run(&data->turboshaft_graph(), temp_zone, data->node_origins(),
turboshaft::VisitOrder::kDominator);
}
@ -2968,13 +2976,13 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<MemoryOptimizationPhase>();
RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
// Run value numbering and machine operator reducer to optimize load/store
// address computation (in particular, reuse the address computation whenever
// possible).
Run<MachineOperatorOptimizationPhase>();
RunPrintAndVerify(MachineOperatorOptimizationPhase::phase_name(), true);
if (!v8_flags.turboshaft) {
// Run value numbering and machine operator reducer to optimize load/store
// address computation (in particular, reuse the address computation
// whenever possible).
Run<MachineOperatorOptimizationPhase>();
RunPrintAndVerify(MachineOperatorOptimizationPhase::phase_name(), true);
Run<DecompressionOptimizationPhase>();
RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(), true);
}

View File

@ -17,6 +17,7 @@
#include "src/base/template-utils.h"
#include "src/codegen/reloc-info.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/operation-matching.h"
#include "src/compiler/turboshaft/operations.h"
namespace v8::internal::compiler::turboshaft {
@ -414,7 +415,6 @@ class AssemblerInterface : public Superclass {
OpIndex WordConstant(uint64_t value, WordRepresentation rep) {
switch (rep.value()) {
case WordRepresentation::Word32():
DCHECK(value <= WordRepresentation::Word32().MaxUnsignedValue());
return Word32Constant(static_cast<uint32_t>(value));
case WordRepresentation::Word64():
return Word64Constant(value);
@ -458,37 +458,101 @@ class AssemblerInterface : public Superclass {
static_cast<uint64_t>(value));
}
#define DECL_CHANGE(name, kind, from, to) \
OpIndex name(OpIndex input) { \
return subclass().Change(input, ChangeOp::Kind::k##kind, \
RegisterRepresentation::from(), \
RegisterRepresentation::to()); \
#define DECL_CHANGE(name, kind, assumption, from, to) \
OpIndex name(OpIndex input) { \
return subclass().Change( \
input, ChangeOp::Kind::kind, ChangeOp::Assumption::assumption, \
RegisterRepresentation::from(), RegisterRepresentation::to()); \
}
#define DECL_TRY_CHANGE(name, kind, from, to) \
OpIndex name(OpIndex input) { \
return subclass().TryChange(input, TryChangeOp::Kind::kind, \
FloatRepresentation::from(), \
WordRepresentation::to()); \
}
DECL_CHANGE(BitcastWord32ToWord64, Bitcast, Word32, Word64)
DECL_CHANGE(BitcastFloat32ToWord32, Bitcast, Float32, Word32)
DECL_CHANGE(BitcastWord32ToFloat32, Bitcast, Word32, Float32)
DECL_CHANGE(BitcastFloat64ToWord64, Bitcast, Float64, Word64)
DECL_CHANGE(BitcastWord6464ToFloat64, Bitcast, Word64, Float64)
DECL_CHANGE(ChangeUint32ToUint64, ZeroExtend, Word32, Word64)
DECL_CHANGE(ChangeInt32ToInt64, SignExtend, Word32, Word64)
DECL_CHANGE(ChangeInt32ToFloat64, SignedToFloat, Word32, Float64)
DECL_CHANGE(ChangeInt64ToFloat64, SignedToFloat, Word64, Float64)
DECL_CHANGE(ChangeUint32ToFloat64, UnsignedToFloat, Word32, Float64)
DECL_CHANGE(ChangeFloat64ToFloat32, FloatConversion, Float64, Float32)
DECL_CHANGE(ChangeFloat32ToFloat64, FloatConversion, Float32, Float64)
DECL_CHANGE(JSTruncateFloat64ToWord32, JSFloatTruncate, Float64, Word32)
DECL_CHANGE(TruncateFloat64ToInt32OverflowUndefined, SignedFloatTruncate,
DECL_CHANGE(BitcastWord32ToWord64, kBitcast, kNoAssumption, Word32, Word64)
DECL_CHANGE(BitcastFloat32ToWord32, kBitcast, kNoAssumption, Float32, Word32)
DECL_CHANGE(BitcastWord32ToFloat32, kBitcast, kNoAssumption, Word32, Float32)
DECL_CHANGE(BitcastFloat64ToWord64, kBitcast, kNoAssumption, Float64, Word64)
DECL_CHANGE(BitcastWord64ToFloat64, kBitcast, kNoAssumption, Word64, Float64)
DECL_CHANGE(ChangeUint32ToUint64, kZeroExtend, kNoAssumption, Word32, Word64)
DECL_CHANGE(ChangeInt32ToInt64, kSignExtend, kNoAssumption, Word32, Word64)
DECL_CHANGE(ChangeInt32ToFloat64, kSignedToFloat, kNoAssumption, Word32,
Float64)
DECL_CHANGE(ChangeInt64ToFloat64, kSignedToFloat, kNoAssumption, Word64,
Float64)
DECL_CHANGE(ChangeInt32ToFloat32, kSignedToFloat, kNoAssumption, Word32,
Float32)
DECL_CHANGE(ChangeInt64ToFloat32, kSignedToFloat, kNoAssumption, Word64,
Float32)
DECL_CHANGE(ChangeUint32ToFloat32, kUnsignedToFloat, kNoAssumption, Word32,
Float32)
DECL_CHANGE(ChangeUint64ToFloat32, kUnsignedToFloat, kNoAssumption, Word64,
Float32)
DECL_CHANGE(ReversibleInt64ToFloat64, kSignedToFloat, kReversible, Word64,
Float64)
DECL_CHANGE(ChangeUint64ToFloat64, kUnsignedToFloat, kNoAssumption, Word64,
Float64)
DECL_CHANGE(ReversibleUint64ToFloat64, kUnsignedToFloat, kReversible, Word64,
Float64)
DECL_CHANGE(ChangeUint32ToFloat64, kUnsignedToFloat, kNoAssumption, Word32,
Float64)
DECL_CHANGE(ChangeFloat64ToFloat32, kFloatConversion, kNoAssumption, Float64,
Float32)
DECL_CHANGE(ChangeFloat32ToFloat64, kFloatConversion, kNoAssumption, Float32,
Float64)
DECL_CHANGE(JSTruncateFloat64ToWord32, kJSFloatTruncate, kNoAssumption,
Float64, Word32)
#define DECL_SIGNED_FLOAT_TRUNCATE(FloatBits, ResultBits) \
DECL_CHANGE(TruncateFloat##FloatBits##ToInt##ResultBits##OverflowUndefined, \
kSignedFloatTruncateOverflowToMin, kNoOverflow, \
Float##FloatBits, Word##ResultBits) \
DECL_CHANGE(TruncateFloat##FloatBits##ToInt##ResultBits##OverflowToMin, \
kSignedFloatTruncateOverflowToMin, kNoAssumption, \
Float##FloatBits, Word##ResultBits) \
DECL_TRY_CHANGE(TryTruncateFloat##FloatBits##ToInt##ResultBits, \
kSignedFloatTruncateOverflowUndefined, Float##FloatBits, \
Word##ResultBits)
DECL_SIGNED_FLOAT_TRUNCATE(64, 64)
DECL_SIGNED_FLOAT_TRUNCATE(64, 32)
DECL_SIGNED_FLOAT_TRUNCATE(32, 64)
DECL_SIGNED_FLOAT_TRUNCATE(32, 32)
#undef DECL_SIGNED_FLOAT_TRUNCATE
#define DECL_UNSIGNED_FLOAT_TRUNCATE(FloatBits, ResultBits) \
DECL_CHANGE(TruncateFloat##FloatBits##ToUint##ResultBits##OverflowUndefined, \
kUnsignedFloatTruncateOverflowToMin, kNoOverflow, \
Float##FloatBits, Word##ResultBits) \
DECL_CHANGE(TruncateFloat##FloatBits##ToUint##ResultBits##OverflowToMin, \
kUnsignedFloatTruncateOverflowToMin, kNoAssumption, \
Float##FloatBits, Word##ResultBits) \
DECL_TRY_CHANGE(TryTruncateFloat##FloatBits##ToUint##ResultBits, \
kUnsignedFloatTruncateOverflowUndefined, Float##FloatBits, \
Word##ResultBits)
DECL_UNSIGNED_FLOAT_TRUNCATE(64, 64)
DECL_UNSIGNED_FLOAT_TRUNCATE(64, 32)
DECL_UNSIGNED_FLOAT_TRUNCATE(32, 64)
DECL_UNSIGNED_FLOAT_TRUNCATE(32, 32)
#undef DECL_UNSIGNED_FLOAT_TRUNCATE
DECL_CHANGE(ReversibleFloat64ToInt32, kSignedFloatTruncateOverflowToMin,
kReversible, Float64, Word32)
DECL_CHANGE(ReversibleFloat64ToUint32, kUnsignedFloatTruncateOverflowToMin,
kReversible, Float64, Word32)
DECL_CHANGE(ReversibleFloat64ToInt64, kSignedFloatTruncateOverflowToMin,
kReversible, Float64, Word64)
DECL_CHANGE(ReversibleFloat64ToUint64, kUnsignedFloatTruncateOverflowToMin,
kReversible, Float64, Word64)
DECL_CHANGE(Float64ExtractLowWord32, kExtractLowHalf, kNoAssumption, Float64,
Word32)
DECL_CHANGE(Float64ExtractHighWord32, kExtractHighHalf, kNoAssumption,
Float64, Word32)
DECL_CHANGE(TruncateFloat64ToInt32OverflowToMin,
SignedFloatTruncateOverflowToMin, Float64, Word32)
DECL_CHANGE(NarrowFloat64ToInt32, SignedNarrowing, Float64, Word32)
DECL_CHANGE(NarrowFloat64ToUint32, UnsignedNarrowing, Float64, Word32)
DECL_CHANGE(NarrowFloat64ToInt64, SignedNarrowing, Float64, Word64)
DECL_CHANGE(NarrowFloat64ToUint64, UnsignedNarrowing, Float64, Word64)
DECL_CHANGE(Float64ExtractLowWord32, ExtractLowHalf, Float64, Word32)
DECL_CHANGE(Float64ExtractHighWord32, ExtractHighHalf, Float64, Word32)
#undef DECL_CHANGE
#undef DECL_TRY_CHANGE
using Base::Tuple;
OpIndex Tuple(OpIndex a, OpIndex b) {
@ -514,7 +578,8 @@ class AssemblerBase {
};
class Assembler
: public AssemblerInterface<Assembler, AssemblerBase<Assembler>> {
: public AssemblerInterface<Assembler, AssemblerBase<Assembler>>,
public OperationMatching<Assembler> {
public:
Block* NewBlock(Block::Kind kind) { return graph_.NewBlock(kind); }

View File

@ -477,94 +477,78 @@ OpIndex GraphBuilder::Process(
UNARY_CASE(Float64Atan, Float64Atan)
UNARY_CASE(Float64Atanh, Float64Atanh)
UNARY_CASE(Float64Cbrt, Float64Cbrt)
UNARY_CASE(BitcastWord32ToWord64, BitcastWord32ToWord64)
UNARY_CASE(BitcastFloat32ToInt32, BitcastFloat32ToWord32)
UNARY_CASE(BitcastInt32ToFloat32, BitcastWord32ToFloat32)
UNARY_CASE(BitcastFloat64ToInt64, BitcastFloat64ToWord64)
UNARY_CASE(BitcastInt64ToFloat64, BitcastWord64ToFloat64)
UNARY_CASE(ChangeUint32ToUint64, ChangeUint32ToUint64)
UNARY_CASE(ChangeInt32ToInt64, ChangeInt32ToInt64)
UNARY_CASE(SignExtendWord32ToInt64, ChangeInt32ToInt64)
UNARY_CASE(ChangeFloat32ToFloat64, ChangeFloat32ToFloat64)
UNARY_CASE(ChangeFloat64ToInt32, ReversibleFloat64ToInt32)
UNARY_CASE(ChangeFloat64ToInt64, ReversibleFloat64ToInt64)
UNARY_CASE(ChangeFloat64ToUint32, ReversibleFloat64ToUint32)
UNARY_CASE(ChangeFloat64ToUint64, ReversibleFloat64ToUint64)
UNARY_CASE(ChangeInt32ToFloat64, ChangeInt32ToFloat64)
UNARY_CASE(ChangeInt64ToFloat64, ReversibleInt64ToFloat64)
UNARY_CASE(ChangeUint32ToFloat64, ChangeUint32ToFloat64)
UNARY_CASE(RoundFloat64ToInt32, TruncateFloat64ToInt32OverflowUndefined)
UNARY_CASE(RoundInt32ToFloat32, ChangeInt32ToFloat32)
UNARY_CASE(RoundInt64ToFloat32, ChangeInt64ToFloat32)
UNARY_CASE(RoundInt64ToFloat64, ChangeInt64ToFloat64)
UNARY_CASE(RoundUint32ToFloat32, ChangeUint32ToFloat32)
UNARY_CASE(RoundUint64ToFloat32, ChangeUint64ToFloat32)
UNARY_CASE(RoundUint64ToFloat64, ChangeUint64ToFloat64)
UNARY_CASE(TruncateFloat64ToFloat32, ChangeFloat64ToFloat32)
UNARY_CASE(TruncateFloat64ToUint32,
TruncateFloat64ToUint32OverflowUndefined)
UNARY_CASE(TruncateFloat64ToWord32, JSTruncateFloat64ToWord32)
UNARY_CASE(TryTruncateFloat32ToInt64, TryTruncateFloat32ToInt64)
UNARY_CASE(TryTruncateFloat32ToUint64, TryTruncateFloat32ToUint64)
UNARY_CASE(TryTruncateFloat64ToInt32, TryTruncateFloat64ToInt32)
UNARY_CASE(TryTruncateFloat64ToInt64, TryTruncateFloat64ToInt64)
UNARY_CASE(TryTruncateFloat64ToUint32, TryTruncateFloat64ToUint32)
UNARY_CASE(TryTruncateFloat64ToUint64, TryTruncateFloat64ToUint64)
UNARY_CASE(Float64ExtractLowWord32, Float64ExtractLowWord32)
UNARY_CASE(Float64ExtractHighWord32, Float64ExtractHighWord32)
#undef UNARY_CASE
#define CHANGE_CASE(opcode, kind, from, to) \
case IrOpcode::k##opcode: \
return assembler.Change(Map(node->InputAt(0)), ChangeOp::Kind::k##kind, \
RegisterRepresentation::from(), \
RegisterRepresentation::to());
CHANGE_CASE(BitcastWord32ToWord64, Bitcast, Word32, Word64)
CHANGE_CASE(BitcastFloat32ToInt32, Bitcast, Float32, Word32)
CHANGE_CASE(BitcastInt32ToFloat32, Bitcast, Word32, Float32)
CHANGE_CASE(BitcastFloat64ToInt64, Bitcast, Float64, Word64)
CHANGE_CASE(BitcastInt64ToFloat64, Bitcast, Word64, Float64)
CHANGE_CASE(ChangeUint32ToUint64, ZeroExtend, Word32, Word64)
CHANGE_CASE(ChangeInt32ToInt64, SignExtend, Word32, Word64)
CHANGE_CASE(SignExtendWord32ToInt64, SignExtend, Word32, Word64)
CHANGE_CASE(ChangeInt32ToFloat64, SignedNarrowing, Word32, Float64)
CHANGE_CASE(ChangeInt64ToFloat64, SignedNarrowing, Word64, Float64)
CHANGE_CASE(ChangeUint32ToFloat64, UnsignedToFloat, Word32, Float64)
CHANGE_CASE(RoundInt64ToFloat64, SignedToFloat, Word64, Float64)
CHANGE_CASE(RoundUint64ToFloat64, UnsignedToFloat, Word64, Float64)
CHANGE_CASE(RoundInt32ToFloat32, SignedToFloat, Word32, Float32)
CHANGE_CASE(RoundUint32ToFloat32, UnsignedToFloat, Word32, Float32)
CHANGE_CASE(RoundInt64ToFloat32, SignedToFloat, Word64, Float32)
CHANGE_CASE(RoundUint64ToFloat32, UnsignedToFloat, Word64, Float32)
CHANGE_CASE(TruncateFloat64ToWord32, JSFloatTruncate, Float64, Word32)
CHANGE_CASE(TruncateFloat64ToFloat32, FloatConversion, Float64, Float32)
CHANGE_CASE(ChangeFloat32ToFloat64, FloatConversion, Float32, Float64)
CHANGE_CASE(RoundFloat64ToInt32, SignedFloatTruncate, Float64, Word32)
CHANGE_CASE(ChangeFloat64ToInt32, SignedNarrowing, Float64, Word32)
CHANGE_CASE(ChangeFloat64ToUint32, UnsignedNarrowing, Float64, Word32)
CHANGE_CASE(ChangeFloat64ToInt64, SignedNarrowing, Float64, Word64)
CHANGE_CASE(ChangeFloat64ToUint64, UnsignedNarrowing, Float64, Word64)
CHANGE_CASE(TryTruncateFloat64ToUint64, UnsignedFloatTruncateSat, Float64,
Word64)
CHANGE_CASE(TryTruncateFloat64ToUint32, UnsignedFloatTruncateSat, Float64,
Word32)
CHANGE_CASE(TryTruncateFloat32ToUint64, UnsignedFloatTruncateSat, Float32,
Word64)
CHANGE_CASE(TryTruncateFloat64ToInt64, SignedFloatTruncateSat, Float64,
Word64)
CHANGE_CASE(TryTruncateFloat64ToInt32, SignedFloatTruncateSat, Float64,
Word32)
CHANGE_CASE(TryTruncateFloat32ToInt64, SignedFloatTruncateSat, Float32,
Word64)
CHANGE_CASE(Float64ExtractLowWord32, ExtractLowHalf, Float64, Word32)
CHANGE_CASE(Float64ExtractHighWord32, ExtractHighHalf, Float64, Word32)
#undef CHANGE_CASE
case IrOpcode::kTruncateInt64ToInt32:
// 64- to 32-bit truncation is implicit in Turboshaft.
return Map(node->InputAt(0));
case IrOpcode::kTruncateFloat32ToInt32: {
ChangeOp::Kind kind =
OpParameter<TruncateKind>(node->op()) ==
TruncateKind::kArchitectureDefault
? ChangeOp::Kind::kSignedFloatTruncate
: ChangeOp::Kind::kSignedFloatTruncateOverflowToMin;
return assembler.Change(Map(node->InputAt(0)), kind,
RegisterRepresentation::Float32(),
RegisterRepresentation::Word32());
}
case IrOpcode::kTruncateFloat32ToUint32: {
ChangeOp::Kind kind =
OpParameter<TruncateKind>(node->op()) ==
TruncateKind::kArchitectureDefault
? ChangeOp::Kind::kUnsignedFloatTruncate
: ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin;
return assembler.Change(Map(node->InputAt(0)), kind,
RegisterRepresentation::Float32(),
RegisterRepresentation::Word32());
}
case IrOpcode::kTruncateFloat64ToInt64: {
ChangeOp::Kind kind =
OpParameter<TruncateKind>(node->op()) ==
TruncateKind::kArchitectureDefault
? ChangeOp::Kind::kSignedFloatTruncate
: ChangeOp::Kind::kSignedFloatTruncateOverflowToMin;
return assembler.Change(Map(node->InputAt(0)), kind,
RegisterRepresentation::Float64(),
RegisterRepresentation::Word64());
}
case IrOpcode::kTruncateFloat64ToUint32: {
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kUnsignedFloatTruncate,
RegisterRepresentation::Float64(), RegisterRepresentation::Word32());
}
case IrOpcode::kTruncateFloat32ToInt32:
switch (OpParameter<TruncateKind>(node->op())) {
case TruncateKind::kArchitectureDefault:
return assembler.TruncateFloat32ToInt32OverflowUndefined(
Map(node->InputAt(0)));
case TruncateKind::kSetOverflowToMin:
return assembler.TruncateFloat32ToInt32OverflowToMin(
Map(node->InputAt(0)));
}
case IrOpcode::kTruncateFloat32ToUint32:
switch (OpParameter<TruncateKind>(node->op())) {
case TruncateKind::kArchitectureDefault:
return assembler.TruncateFloat32ToUint32OverflowUndefined(
Map(node->InputAt(0)));
case TruncateKind::kSetOverflowToMin:
return assembler.TruncateFloat32ToUint32OverflowToMin(
Map(node->InputAt(0)));
}
case IrOpcode::kTruncateFloat64ToInt64:
switch (OpParameter<TruncateKind>(node->op())) {
case TruncateKind::kArchitectureDefault:
return assembler.TruncateFloat64ToInt64OverflowUndefined(
Map(node->InputAt(0)));
case TruncateKind::kSetOverflowToMin:
return assembler.TruncateFloat64ToInt64OverflowToMin(
Map(node->InputAt(0)));
}
case IrOpcode::kFloat64InsertLowWord32:
return assembler.Float64InsertWord32(
Map(node->InputAt(0)), Map(node->InputAt(1)),

View File

@ -287,8 +287,7 @@ class Block : public RandomAccessStackDominatorNode<Block> {
return result;
}
#ifdef DEBUG
int PredecessorCount() {
int PredecessorCount() const {
int count = 0;
for (Block* pred = last_predecessor_; pred != nullptr;
pred = pred->neighboring_predecessor_) {
@ -296,7 +295,6 @@ class Block : public RandomAccessStackDominatorNode<Block> {
}
return count;
}
#endif
Block* LastPredecessor() const { return last_predecessor_; }
Block* NeighboringPredecessor() const { return neighboring_predecessor_; }
@ -483,6 +481,19 @@ class Graph {
block->end_ = next_operation_index();
}
void TurnLoopIntoMerge(Block* loop) {
DCHECK(loop->IsLoop());
DCHECK_EQ(loop->PredecessorCount(), 1);
loop->kind_ = Block::Kind::kMerge;
for (Operation& op : operations(*loop)) {
if (auto* pending_phi = op.TryCast<PendingLoopPhiOp>()) {
Replace<PhiOp>(Index(*pending_phi),
base::VectorOf({pending_phi->first()}),
pending_phi->rep);
}
}
}
OpIndex next_operation_index() const { return operations_.EndIndex(); }
Zone* graph_zone() const { return graph_zone_; }

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,315 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_OPERATION_MATCHING_H_
#define V8_COMPILER_TURBOSHAFT_OPERATION_MATCHING_H_
#include "src/compiler/turboshaft/operations.h"
namespace v8 ::internal::compiler::turboshaft {
template <class Assembler>
class OperationMatching {
public:
template <class Op>
bool Is(OpIndex op_idx) {
return assembler().graph().Get(op_idx).template Is<Op>();
}
template <class Op>
const Op* TryCast(OpIndex op_idx) {
return assembler().graph().Get(op_idx).template TryCast<Op>();
}
template <class Op>
const Op& Cast(OpIndex op_idx) {
return assembler().graph().Get(op_idx).template Cast<Op>();
}
const Operation& Get(OpIndex op_idx) {
return assembler().graph().Get(op_idx);
}
bool MatchZero(OpIndex matched) {
const ConstantOp* op = TryCast<ConstantOp>(matched);
if (!op) return false;
switch (op->kind) {
case ConstantOp::Kind::kWord32:
case ConstantOp::Kind::kWord64:
return op->integral() == 0;
case ConstantOp::Kind::kFloat32:
return op->float32() == 0;
case ConstantOp::Kind::kFloat64:
return op->float64() == 0;
default:
return false;
}
}
bool MatchFloat32Constant(OpIndex matched, float* constant) {
const ConstantOp* op = TryCast<ConstantOp>(matched);
if (!op) return false;
if (op->kind != ConstantOp::Kind::kFloat32) return false;
*constant = op->float32();
return true;
}
bool MatchFloat64Constant(OpIndex matched, double* constant) {
const ConstantOp* op = TryCast<ConstantOp>(matched);
if (!op) return false;
if (op->kind != ConstantOp::Kind::kFloat64) return false;
*constant = op->float64();
return true;
}
bool MatchFloat(OpIndex matched, double* value) {
const ConstantOp* op = TryCast<ConstantOp>(matched);
if (!op) return false;
if (op->kind == ConstantOp::Kind::kFloat64) {
*value = op->float64();
return true;
} else if (op->kind == ConstantOp::Kind::kFloat32) {
*value = op->float32();
return true;
}
return false;
}
bool MatchFloat(OpIndex matched, double value) {
double k;
if (!MatchFloat(matched, &k)) return false;
return base::bit_cast<uint64_t>(value) == base::bit_cast<uint64_t>(k) ||
(std::isnan(k) && std::isnan(value));
}
bool MatchNaN(OpIndex matched) {
double k;
return MatchFloat(matched, &k) && std::isnan(k);
}
bool MatchWordConstant(OpIndex matched, WordRepresentation rep,
uint64_t* unsigned_constant,
int64_t* signed_constant = nullptr) {
const ConstantOp* op = TryCast<ConstantOp>(matched);
if (!op) return false;
switch (op->Representation()) {
case RegisterRepresentation::Word32():
if (rep != WordRepresentation::Word32()) return false;
break;
case RegisterRepresentation::Word64():
if (!(rep == any_of(WordRepresentation::Word64(),
WordRepresentation::Word32()))) {
return false;
}
break;
default:
return false;
}
if (unsigned_constant) *unsigned_constant = op->integral();
if (signed_constant) *signed_constant = op->signed_integral();
return true;
}
bool MatchWordConstant(OpIndex matched, WordRepresentation rep,
int64_t* signed_constant) {
return MatchWordConstant(matched, rep, nullptr, signed_constant);
}
bool MatchWord64Constant(OpIndex matched, uint64_t* constant) {
return MatchWordConstant(matched, WordRepresentation::Word64(), constant);
}
bool MatchWord32Constant(OpIndex matched, uint32_t* constant) {
if (uint64_t value;
MatchWordConstant(matched, WordRepresentation::Word32(), &value)) {
*constant = static_cast<uint32_t>(value);
return true;
}
return false;
}
bool MatchWord64Constant(OpIndex matched, int64_t* constant) {
return MatchWordConstant(matched, WordRepresentation::Word64(), constant);
}
bool MatchWord32Constant(OpIndex matched, int32_t* constant) {
if (int64_t value;
MatchWordConstant(matched, WordRepresentation::Word32(), &value)) {
*constant = static_cast<int32_t>(value);
return true;
}
return false;
}
bool MatchChange(OpIndex matched, OpIndex* input, ChangeOp::Kind kind,
RegisterRepresentation from, RegisterRepresentation to) {
const ChangeOp* op = TryCast<ChangeOp>(matched);
if (!op || op->kind != kind || op->from != from || op->to != to) {
return false;
}
*input = op->input();
return true;
}
bool MatchWordBinop(OpIndex matched, OpIndex* left, OpIndex* right,
WordBinopOp::Kind* kind, WordRepresentation* rep) {
const WordBinopOp* op = TryCast<WordBinopOp>(matched);
if (!op) return false;
*kind = op->kind;
*rep = op->rep;
*left = op->left();
*right = op->right();
return true;
}
bool MatchWordBinop(OpIndex matched, OpIndex* left, OpIndex* right,
WordBinopOp::Kind kind, WordRepresentation rep) {
const WordBinopOp* op = TryCast<WordBinopOp>(matched);
if (!op || kind != op->kind) {
return false;
}
if (!(rep == op->rep ||
(WordBinopOp::AllowsWord64ToWord32Truncation(kind) &&
rep == WordRepresentation::Word32() &&
op->rep == WordRepresentation::Word64()))) {
return false;
}
*left = op->left();
*right = op->right();
return true;
}
bool MatchWordAdd(OpIndex matched, OpIndex* left, OpIndex* right,
WordRepresentation rep) {
return MatchWordBinop(matched, left, right, WordBinopOp::Kind::kAdd, rep);
}
bool MatchWordSub(OpIndex matched, OpIndex* left, OpIndex* right,
WordRepresentation rep) {
return MatchWordBinop(matched, left, right, WordBinopOp::Kind::kSub, rep);
}
bool MatchBitwiseAnd(OpIndex matched, OpIndex* left, OpIndex* right,
WordRepresentation rep) {
return MatchWordBinop(matched, left, right, WordBinopOp::Kind::kBitwiseAnd,
rep);
}
bool MatchEqual(OpIndex matched, OpIndex* left, OpIndex* right,
WordRepresentation rep) {
const EqualOp* op = TryCast<EqualOp>(matched);
if (!op || rep != op->rep) return false;
*left = op->left();
*right = op->right();
return true;
}
bool MatchComparison(OpIndex matched, OpIndex* left, OpIndex* right,
ComparisonOp::Kind* kind, RegisterRepresentation* rep) {
const ComparisonOp* op = TryCast<ComparisonOp>(matched);
if (!op) return false;
*kind = op->kind;
*rep = op->rep;
*left = op->left();
*right = op->right();
return true;
}
bool MatchFloatUnary(OpIndex matched, OpIndex* input, FloatUnaryOp::Kind kind,
FloatRepresentation rep) {
const FloatUnaryOp* op = TryCast<FloatUnaryOp>(matched);
if (!op || op->kind != kind || op->rep != rep) return false;
*input = op->input();
return true;
}
bool MatchFloatRoundDown(OpIndex matched, OpIndex* input,
FloatRepresentation rep) {
return MatchFloatUnary(matched, input, FloatUnaryOp::Kind::kRoundDown, rep);
}
bool MatchFloatBinary(OpIndex matched, OpIndex* left, OpIndex* right,
FloatBinopOp::Kind kind, FloatRepresentation rep) {
const FloatBinopOp* op = TryCast<FloatBinopOp>(matched);
if (!op || op->kind != kind || op->rep != rep) return false;
*left = op->left();
*right = op->right();
return true;
}
bool MatchFloatSub(OpIndex matched, OpIndex* left, OpIndex* right,
FloatRepresentation rep) {
return MatchFloatBinary(matched, left, right, FloatBinopOp::Kind::kSub,
rep);
}
bool MatchConstantShift(OpIndex matched, OpIndex* input, ShiftOp::Kind* kind,
WordRepresentation* rep, int* amount) {
const ShiftOp* op = TryCast<ShiftOp>(matched);
if (uint32_t rhs_constant;
op && MatchWord32Constant(op->right(), &rhs_constant) &&
rhs_constant < static_cast<uint64_t>(op->rep.bit_width())) {
*input = op->left();
*kind = op->kind;
*rep = op->rep;
*amount = static_cast<int>(rhs_constant);
return true;
}
return false;
}
bool MatchConstantShift(OpIndex matched, OpIndex* input, ShiftOp::Kind kind,
WordRepresentation rep, int* amount) {
const ShiftOp* op = TryCast<ShiftOp>(matched);
if (uint32_t rhs_constant;
op && op->kind == kind &&
(op->rep == rep || (ShiftOp::AllowsWord64ToWord32Truncation(kind) &&
rep == WordRepresentation::Word32() &&
op->rep == WordRepresentation::Word64())) &&
MatchWord32Constant(op->right(), &rhs_constant) &&
rhs_constant < static_cast<uint64_t>(rep.bit_width())) {
*input = op->left();
*amount = static_cast<int>(rhs_constant);
return true;
}
return false;
}
bool MatchConstantRightShift(OpIndex matched, OpIndex* input,
WordRepresentation rep, int* amount) {
const ShiftOp* op = TryCast<ShiftOp>(matched);
if (uint32_t rhs_constant;
op && ShiftOp::IsRightShift(op->kind) && op->rep == rep &&
MatchWord32Constant(op->right(), &rhs_constant) &&
rhs_constant < static_cast<uint32_t>(rep.bit_width())) {
*input = op->left();
*amount = static_cast<int>(rhs_constant);
return true;
}
return false;
}
bool MatchConstantShiftRightArithmeticShiftOutZeros(OpIndex matched,
OpIndex* input,
WordRepresentation rep,
uint16_t* amount) {
const ShiftOp* op = TryCast<ShiftOp>(matched);
if (uint32_t rhs_constant;
op && op->kind == ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros &&
op->rep == rep && MatchWord32Constant(op->right(), &rhs_constant) &&
rhs_constant < static_cast<uint64_t>(rep.bit_width())) {
*input = op->left();
*amount = static_cast<uint16_t>(rhs_constant);
return true;
}
return false;
}
private:
Assembler& assembler() { return *static_cast<Assembler*>(this); }
};
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_OPERATION_MATCHING_H_

View File

@ -200,20 +200,12 @@ std::ostream& operator<<(std::ostream& os, ComparisonOp::Kind kind) {
std::ostream& operator<<(std::ostream& os, ChangeOp::Kind kind) {
switch (kind) {
case ChangeOp::Kind::kSignedNarrowing:
return os << "SignedNarrowing";
case ChangeOp::Kind::kUnsignedNarrowing:
return os << "UnsignedNarrowing";
case ChangeOp::Kind::kFloatConversion:
return os << "FloatConversion";
case ChangeOp::Kind::kJSFloatTruncate:
return os << "JSFloatTruncate";
case ChangeOp::Kind::kSignedFloatTruncate:
return os << "SignedFloatTruncate";
case ChangeOp::Kind::kSignedFloatTruncateOverflowToMin:
return os << "SignedFloatTruncateOverflowToMin";
case ChangeOp::Kind::kUnsignedFloatTruncate:
return os << "UnsignedFloatTruncate";
case ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin:
return os << "UnsignedFloatTruncateOverflowToMin";
case ChangeOp::Kind::kSignedToFloat:
@ -230,10 +222,26 @@ std::ostream& operator<<(std::ostream& os, ChangeOp::Kind kind) {
return os << "SignExtend";
case ChangeOp::Kind::kBitcast:
return os << "Bitcast";
case ChangeOp::Kind::kSignedFloatTruncateSat:
return os << "SignedFloatTruncateSat";
case ChangeOp::Kind::kUnsignedFloatTruncateSat:
return os << "UnsignedFloatTruncateSat";
}
}
std::ostream& operator<<(std::ostream& os, TryChangeOp::Kind kind) {
switch (kind) {
case TryChangeOp::Kind::kSignedFloatTruncateOverflowUndefined:
return os << "SignedFloatTruncateOverflowUndefined";
case TryChangeOp::Kind::kUnsignedFloatTruncateOverflowUndefined:
return os << "UnsignedFloatTruncateOverflowUndefined";
}
}
std::ostream& operator<<(std::ostream& os, ChangeOp::Assumption assumption) {
switch (assumption) {
case ChangeOp::Assumption::kNoAssumption:
return os << "NoAssumption";
case ChangeOp::Assumption::kNoOverflow:
return os << "NoOverflow";
case ChangeOp::Assumption::kReversible:
return os << "Reversible";
}
}

View File

@ -70,6 +70,7 @@ class Graph;
V(Equal) \
V(Comparison) \
V(Change) \
V(TryChange) \
V(Float64InsertWord32) \
V(TaggedBitcast) \
V(Select) \
@ -883,35 +884,44 @@ struct ComparisonOp : FixedArityOperationT<2, ComparisonOp> {
RegisterRepresentation::Word64(),
RegisterRepresentation::Float32(),
RegisterRepresentation::Float64()));
DCHECK_IMPLIES(
rep == any_of(RegisterRepresentation::Float32(),
RegisterRepresentation::Float64()),
kind == any_of(Kind::kSignedLessThan, Kind::kSignedLessThanOrEqual));
}
auto options() const { return std::tuple{kind, rep}; }
static bool IsSigned(Kind kind) {
switch (kind) {
case Kind::kSignedLessThan:
case Kind::kSignedLessThanOrEqual:
return true;
case Kind::kUnsignedLessThan:
case Kind::kUnsignedLessThanOrEqual:
return false;
}
}
static Kind SetSigned(Kind kind, bool is_signed) {
switch (kind) {
case Kind::kSignedLessThan:
case Kind::kUnsignedLessThan:
return is_signed ? Kind::kSignedLessThan : Kind::kUnsignedLessThan;
case Kind::kSignedLessThanOrEqual:
case Kind::kUnsignedLessThanOrEqual:
return is_signed ? Kind::kSignedLessThanOrEqual
: Kind::kUnsignedLessThanOrEqual;
}
}
};
std::ostream& operator<<(std::ostream& os, ComparisonOp::Kind kind);
struct ChangeOp : FixedArityOperationT<1, ChangeOp> {
enum class Kind : uint8_t {
// narrowing means undefined behavior if value cannot be represented
// precisely
kSignedNarrowing,
kUnsignedNarrowing,
// convert between different floating-point types
kFloatConversion,
// conversion to signed integer, rounding towards zero,
// overflow behavior system-specific
kSignedFloatTruncate,
// like kSignedFloatTruncate, but overflow guaranteed to result in the
// minimal integer
// overflow guaranteed to result in the minimal integer
kSignedFloatTruncateOverflowToMin,
// like kSignedFloatTruncate, but saturates to min/max value if overflow
kSignedFloatTruncateSat,
// conversion to unsigned integer, rounding towards zero,
// overflow behavior system-specific
kUnsignedFloatTruncate,
// like kUnsignedFloatTruncate, but overflow guaranteed to result in the
// minimal integer
kUnsignedFloatTruncateOverflowToMin,
// like kUnsignedFloatTruncate, but saturates to 0/max value if overflow
kUnsignedFloatTruncateSat,
// JS semantics float64 to word32 truncation
// https://tc39.es/ecma262/#sec-touint32
kJSFloatTruncate,
@ -928,20 +938,110 @@ struct ChangeOp : FixedArityOperationT<1, ChangeOp> {
// preserve bits, change meaning
kBitcast
};
// Violated assumptions result in undefined behavior.
enum class Assumption : uint8_t {
kNoAssumption,
// Used for conversions from floating-point to integer, assumes that the
// value doesn't exceed the integer range.
kNoOverflow,
// Assume that the original value can be recovered by a corresponding
// reverse transformation.
kReversible,
};
Kind kind;
// Reversible means undefined behavior if value cannot be represented
// precisely.
Assumption assumption;
RegisterRepresentation from;
RegisterRepresentation to;
static bool IsReversible(Kind kind, Assumption assumption,
RegisterRepresentation from,
RegisterRepresentation to, Kind reverse_kind,
bool signalling_nan_possible) {
switch (kind) {
case Kind::kFloatConversion:
return from == RegisterRepresentation::Float32() &&
to == RegisterRepresentation::Float64() &&
reverse_kind == Kind::kFloatConversion &&
!signalling_nan_possible;
case Kind::kSignedFloatTruncateOverflowToMin:
return assumption == Assumption::kReversible &&
reverse_kind == Kind::kSignedToFloat;
case Kind::kUnsignedFloatTruncateOverflowToMin:
return assumption == Assumption::kReversible &&
reverse_kind == Kind::kUnsignedToFloat;
case Kind::kJSFloatTruncate:
return false;
case Kind::kSignedToFloat:
if (from == RegisterRepresentation::Word32() &&
to == RegisterRepresentation::Float64()) {
return reverse_kind == any_of(Kind::kSignedFloatTruncateOverflowToMin,
Kind::kJSFloatTruncate);
} else {
return assumption == Assumption::kReversible &&
reverse_kind ==
any_of(Kind::kSignedFloatTruncateOverflowToMin);
}
case Kind::kUnsignedToFloat:
if (from == RegisterRepresentation::Word32() &&
to == RegisterRepresentation::Float64()) {
return reverse_kind ==
any_of(Kind::kUnsignedFloatTruncateOverflowToMin,
Kind::kJSFloatTruncate);
} else {
return assumption == Assumption::kReversible &&
reverse_kind == Kind::kUnsignedFloatTruncateOverflowToMin;
}
case Kind::kExtractHighHalf:
case Kind::kExtractLowHalf:
case Kind::kZeroExtend:
case Kind::kSignExtend:
return false;
case Kind::kBitcast:
return reverse_kind == Kind::kBitcast;
}
}
bool IsReversibleBy(Kind reverse_kind, bool signalling_nan_possible) const {
return IsReversible(kind, assumption, from, to, reverse_kind,
signalling_nan_possible);
}
static constexpr OpProperties properties = OpProperties::Pure();
OpIndex input() const { return Base::input(0); }
ChangeOp(OpIndex input, Kind kind, RegisterRepresentation from,
RegisterRepresentation to)
ChangeOp(OpIndex input, Kind kind, Assumption assumption,
RegisterRepresentation from, RegisterRepresentation to)
: Base(input), kind(kind), assumption(assumption), from(from), to(to) {}
auto options() const { return std::tuple{kind, assumption, from, to}; }
};
std::ostream& operator<<(std::ostream& os, ChangeOp::Kind kind);
std::ostream& operator<<(std::ostream& os, ChangeOp::Assumption assumption);
// Perform a conversion and return a pair of the result and a bit if it was
// successful.
struct TryChangeOp : FixedArityOperationT<1, TryChangeOp> {
enum class Kind : uint8_t {
// The result of the truncation is undefined if the result is out of range.
kSignedFloatTruncateOverflowUndefined,
kUnsignedFloatTruncateOverflowUndefined,
};
Kind kind;
FloatRepresentation from;
WordRepresentation to;
static constexpr OpProperties properties = OpProperties::Pure();
OpIndex input() const { return Base::input(0); }
TryChangeOp(OpIndex input, Kind kind, FloatRepresentation from,
WordRepresentation to)
: Base(input), kind(kind), from(from), to(to) {}
auto options() const { return std::tuple{kind, from, to}; }
};
std::ostream& operator<<(std::ostream& os, ChangeOp::Kind kind);
std::ostream& operator<<(std::ostream& os, TryChangeOp::Kind kind);
// TODO(tebbi): Unify with other operations.
struct Float64InsertWord32Op : FixedArityOperationT<2, Float64InsertWord32Op> {
@ -970,7 +1070,12 @@ struct TaggedBitcastOp : FixedArityOperationT<1, TaggedBitcastOp> {
TaggedBitcastOp(OpIndex input, RegisterRepresentation from,
RegisterRepresentation to)
: Base(input), from(from), to(to) {}
: Base(input), from(from), to(to) {
DCHECK((from == RegisterRepresentation::PointerSized() &&
to == RegisterRepresentation::Tagged()) ||
(from == RegisterRepresentation::Tagged() &&
to == RegisterRepresentation::PointerSized()));
}
auto options() const { return std::tuple{from, to}; }
};

View File

@ -208,6 +208,17 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
}
if (!assembler.Bind(MapToNewGraph(input_block.index()))) {
if constexpr (trace_reduction) TraceBlockUnreachable();
// If we eliminate a loop backedge, we need to turn the loop into a
// single-predecessor merge block.
const Operation& last_op =
*base::Reversed(input_graph.operations(input_block)).begin();
if (auto* final_goto = last_op.TryCast<GotoOp>()) {
if (final_goto->destination->IsLoop() &&
final_goto->destination->PredecessorCount() == 1) {
assembler.graph().TurnLoopIntoMerge(
MapToNewGraph(final_goto->destination->index()));
}
}
assembler.ExitBlock(input_block);
return;
}
@ -437,8 +448,14 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
MapToNewGraph(op.right()), op.kind, op.rep);
}
OpIndex ReduceChange(const ChangeOp& op) {
return assembler.Change(MapToNewGraph(op.input()), op.kind, op.from, op.to);
return assembler.Change(MapToNewGraph(op.input()), op.kind, op.assumption,
op.from, op.to);
}
OpIndex ReduceTryChange(const TryChangeOp& op) {
return assembler.TryChange(MapToNewGraph(op.input()), op.kind, op.from,
op.to);
}
OpIndex ReduceFloat64InsertWord32(const Float64InsertWord32Op& op) {
return assembler.Float64InsertWord32(MapToNewGraph(op.float64()),
MapToNewGraph(op.word32()), op.kind);

View File

@ -618,6 +618,7 @@ Node* ScheduleBuilder::ProcessOperation(const ChangeOp& op) {
const Operator* o;
switch (op.kind) {
using Kind = ChangeOp::Kind;
using Assumption = ChangeOp::Assumption;
case Kind::kFloatConversion:
if (op.from == FloatRepresentation::Float64() &&
op.to == FloatRepresentation::Float32()) {
@ -629,50 +630,55 @@ Node* ScheduleBuilder::ProcessOperation(const ChangeOp& op) {
UNIMPLEMENTED();
}
break;
case Kind::kSignedFloatTruncate:
if (op.from == FloatRepresentation::Float64() &&
op.to == WordRepresentation::Word64()) {
o = machine.TruncateFloat64ToInt64(TruncateKind::kArchitectureDefault);
} else if (op.from == FloatRepresentation::Float64() &&
op.to == WordRepresentation::Word32()) {
o = machine.RoundFloat64ToInt32();
} else if (op.from == FloatRepresentation::Float32() &&
op.to == WordRepresentation::Word32()) {
o = machine.TruncateFloat32ToInt32(TruncateKind::kArchitectureDefault);
} else {
UNIMPLEMENTED();
}
break;
case Kind::kSignedFloatTruncateOverflowToMin:
case Kind::kUnsignedFloatTruncateOverflowToMin: {
bool is_signed = op.kind == Kind::kSignedFloatTruncateOverflowToMin;
if (op.assumption == Assumption::kReversible) {
if (op.from == FloatRepresentation::Float64() &&
op.to == WordRepresentation::Word64()) {
o = is_signed ? machine.ChangeFloat64ToInt64()
: machine.ChangeFloat64ToUint64();
} else if (op.from == FloatRepresentation::Float64() &&
op.to == WordRepresentation::Word32()) {
o = is_signed ? machine.ChangeFloat64ToInt32()
: machine.ChangeFloat64ToUint32();
} else {
UNIMPLEMENTED();
}
break;
}
TruncateKind truncate_kind;
switch (op.assumption) {
case ChangeOp::Assumption::kReversible:
UNREACHABLE();
case ChangeOp::Assumption::kNoAssumption:
truncate_kind = TruncateKind::kSetOverflowToMin;
break;
case ChangeOp::Assumption::kNoOverflow:
truncate_kind = TruncateKind::kArchitectureDefault;
break;
}
if (op.from == FloatRepresentation::Float64() &&
op.to == WordRepresentation::Word64()) {
o = machine.TruncateFloat64ToInt64(TruncateKind::kSetOverflowToMin);
} else if (op.from == FloatRepresentation::Float32() &&
op.to == WordRepresentation::Word32()) {
o = machine.TruncateFloat32ToInt32(TruncateKind::kSetOverflowToMin);
} else {
UNIMPLEMENTED();
}
break;
case Kind::kUnsignedFloatTruncate:
if (op.from == FloatRepresentation::Float32() &&
op.to == WordRepresentation::Word32()) {
o = machine.TruncateFloat32ToUint32(TruncateKind::kArchitectureDefault);
DCHECK(is_signed);
o = machine.TruncateFloat64ToInt64(truncate_kind);
} else if (op.from == FloatRepresentation::Float64() &&
op.to == WordRepresentation::Word32()) {
o = machine.TruncateFloat64ToUint32();
} else {
UNIMPLEMENTED();
}
break;
case Kind::kUnsignedFloatTruncateOverflowToMin:
if (op.from == FloatRepresentation::Float32() &&
op.to == WordRepresentation::Word32()) {
o = machine.TruncateFloat32ToUint32(TruncateKind::kSetOverflowToMin);
if (is_signed) {
DCHECK_EQ(truncate_kind, TruncateKind::kArchitectureDefault);
o = machine.RoundFloat64ToInt32();
} else {
machine.TruncateFloat32ToUint32(truncate_kind);
}
} else if (op.from == FloatRepresentation::Float32() &&
op.to == WordRepresentation::Word32()) {
o = is_signed ? machine.TruncateFloat32ToInt32(truncate_kind)
: machine.TruncateFloat32ToUint32(truncate_kind);
} else {
UNIMPLEMENTED();
}
break;
}
case Kind::kJSFloatTruncate:
if (op.from == FloatRepresentation::Float64() &&
op.to == WordRepresentation::Word32()) {
@ -684,10 +690,13 @@ Node* ScheduleBuilder::ProcessOperation(const ChangeOp& op) {
case Kind::kSignedToFloat:
if (op.from == WordRepresentation::Word32() &&
op.to == FloatRepresentation::Float64()) {
DCHECK_EQ(op.assumption, Assumption::kNoAssumption);
o = machine.ChangeInt32ToFloat64();
} else if (op.from == WordRepresentation::Word64() &&
op.to == FloatRepresentation::Float64()) {
o = machine.RoundInt64ToFloat64();
o = op.assumption == Assumption::kReversible
? machine.ChangeInt64ToFloat64()
: machine.RoundInt64ToFloat64();
} else if (op.from == WordRepresentation::Word32() &&
op.to == FloatRepresentation::Float32()) {
o = machine.RoundInt32ToFloat32();
@ -764,35 +773,14 @@ Node* ScheduleBuilder::ProcessOperation(const ChangeOp& op) {
UNIMPLEMENTED();
}
break;
case Kind::kSignedNarrowing:
if (op.from == FloatRepresentation::Float64() &&
op.to == WordRepresentation::Word64()) {
o = machine.ChangeFloat64ToInt64();
} else if (op.from == FloatRepresentation::Float64() &&
op.to == WordRepresentation::Word32()) {
o = machine.ChangeFloat64ToInt32();
} else if (op.from == WordRepresentation::Word32() &&
op.to == FloatRepresentation::Float64()) {
o = machine.ChangeInt32ToFloat64();
} else if (op.from == WordRepresentation::Word64() &&
op.to == FloatRepresentation::Float64()) {
o = machine.ChangeInt64ToFloat64();
} else {
UNIMPLEMENTED();
}
break;
case Kind::kUnsignedNarrowing:
if (op.from == FloatRepresentation::Float64() &&
op.to == WordRepresentation::Word64()) {
o = machine.ChangeFloat64ToUint64();
} else if (op.from == FloatRepresentation::Float64() &&
op.to == WordRepresentation::Word32()) {
o = machine.ChangeFloat64ToUint32();
} else {
UNIMPLEMENTED();
}
break;
case Kind::kSignedFloatTruncateSat:
}
return AddNode(o, {GetNode(op.input())});
}
Node* ScheduleBuilder::ProcessOperation(const TryChangeOp& op) {
const Operator* o;
switch (op.kind) {
using Kind = TryChangeOp::Kind;
case Kind::kSignedFloatTruncateOverflowUndefined:
if (op.from == FloatRepresentation::Float64() &&
op.to == WordRepresentation::Word64()) {
o = machine.TryTruncateFloat64ToInt64();
@ -806,7 +794,7 @@ Node* ScheduleBuilder::ProcessOperation(const ChangeOp& op) {
UNREACHABLE();
}
break;
case Kind::kUnsignedFloatTruncateSat:
case Kind::kUnsignedFloatTruncateOverflowUndefined:
if (op.from == FloatRepresentation::Float64() &&
op.to == WordRepresentation::Word64()) {
o = machine.TryTruncateFloat64ToUint64();

View File

@ -14,6 +14,9 @@
namespace v8::internal::compiler::turboshaft {
class WordRepresentation;
class FloatRepresentation;
class RegisterRepresentation {
public:
enum class Enum : uint8_t {
@ -26,7 +29,12 @@ class RegisterRepresentation {
};
explicit constexpr RegisterRepresentation(Enum value) : value_(value) {}
constexpr Enum value() const { return value_; }
RegisterRepresentation() : value_(kInvalid) {}
constexpr Enum value() const {
DCHECK_NE(value_, kInvalid);
return value_;
}
constexpr operator Enum() const { return value(); }
static constexpr RegisterRepresentation Word32() {
@ -62,6 +70,32 @@ class RegisterRepresentation {
}
}
bool IsWord() {
switch (*this) {
case Enum::kWord32:
case Enum::kWord64:
return true;
case Enum::kFloat32:
case Enum::kFloat64:
case Enum::kTagged:
case Enum::kCompressed:
return false;
}
}
bool IsFloat() {
switch (*this) {
case Enum::kFloat32:
case Enum::kFloat64:
return true;
case Enum::kWord32:
case Enum::kWord64:
case Enum::kTagged:
case Enum::kCompressed:
return false;
}
}
uint64_t MaxUnsignedValue() const {
switch (this->value()) {
case Word32():
@ -93,7 +127,7 @@ class RegisterRepresentation {
}
}
uint16_t bit_width() const {
constexpr uint16_t bit_width() const {
switch (*this) {
case Word32():
return 32;
@ -142,6 +176,8 @@ class RegisterRepresentation {
private:
Enum value_;
static constexpr Enum kInvalid = static_cast<Enum>(-1);
};
V8_INLINE bool operator==(RegisterRepresentation a, RegisterRepresentation b) {
@ -166,6 +202,11 @@ class WordRepresentation : public RegisterRepresentation {
explicit constexpr WordRepresentation(Enum value)
: RegisterRepresentation(
static_cast<RegisterRepresentation::Enum>(value)) {}
WordRepresentation() = default;
explicit constexpr WordRepresentation(RegisterRepresentation rep)
: WordRepresentation(static_cast<Enum>(rep.value())) {
DCHECK(rep.IsWord());
}
static constexpr WordRepresentation Word32() {
return WordRepresentation(Enum::kWord32);
@ -174,6 +215,10 @@ class WordRepresentation : public RegisterRepresentation {
return WordRepresentation(Enum::kWord64);
}
static constexpr WordRepresentation PointerSized() {
return WordRepresentation(RegisterRepresentation::PointerSized());
}
constexpr Enum value() const {
return static_cast<Enum>(RegisterRepresentation::value());
}
@ -187,6 +232,22 @@ class WordRepresentation : public RegisterRepresentation {
return std::numeric_limits<uint64_t>::max();
}
}
constexpr int64_t MinSignedValue() const {
switch (this->value()) {
case Word32():
return std::numeric_limits<int32_t>::min();
case Word64():
return std::numeric_limits<int64_t>::min();
}
}
constexpr int64_t MaxSignedValue() const {
switch (this->value()) {
case Word32():
return std::numeric_limits<int32_t>::max();
case Word64():
return std::numeric_limits<int64_t>::max();
}
}
};
class FloatRepresentation : public RegisterRepresentation {
@ -203,15 +264,15 @@ class FloatRepresentation : public RegisterRepresentation {
return FloatRepresentation(Enum::kFloat64);
}
explicit constexpr FloatRepresentation(Enum value)
: RegisterRepresentation(
static_cast<RegisterRepresentation::Enum>(value)) {}
FloatRepresentation() = default;
constexpr Enum value() const {
return static_cast<Enum>(RegisterRepresentation::value());
}
constexpr operator Enum() const { return value(); }
private:
explicit constexpr FloatRepresentation(Enum value)
: RegisterRepresentation(
static_cast<RegisterRepresentation::Enum>(value)) {}
};
class MemoryRepresentation {
@ -234,7 +295,11 @@ class MemoryRepresentation {
};
explicit constexpr MemoryRepresentation(Enum value) : value_(value) {}
constexpr Enum value() const { return value_; }
MemoryRepresentation() : value_(kInvalid) {}
constexpr Enum value() const {
DCHECK_NE(value_, kInvalid);
return value_;
}
constexpr operator Enum() const { return value(); }
static constexpr MemoryRepresentation Int8() {
@ -517,6 +582,8 @@ class MemoryRepresentation {
private:
Enum value_;
static constexpr Enum kInvalid = static_cast<Enum>(-1);
};
V8_INLINE bool operator==(MemoryRepresentation a, MemoryRepresentation b) {

View File

@ -0,0 +1,26 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/turboshaft/utils.h"
#include "src/base/platform/platform.h"
#include "src/flags/flags.h"
namespace v8::internal::compiler::turboshaft {
#ifdef DEBUG
bool ShouldSkipOptimizationStep() {
static std::atomic<uint64_t> counter{0};
uint64_t current = counter++;
if (current == FLAG_turboshaft_opt_bisect_break) {
base::OS::DebugBreak();
}
if (current >= FLAG_turboshaft_opt_bisect_limit) {
return true;
}
return false;
}
#endif // DEBUG
} // namespace v8::internal::compiler::turboshaft

View File

@ -53,11 +53,6 @@ struct all_of : std::tuple<const Ts&...> {
return ((value == std::get<indices>(*this)) && ...);
}
template <class T, size_t... indices>
bool AllNotEqualTo(const T& value, std::index_sequence<indices...>) {
return ((value != std::get<indices>(*this)) && ...);
}
template <size_t... indices>
std::ostream& PrintTo(std::ostream& os, std::index_sequence<indices...>) {
bool first = true;
@ -76,16 +71,17 @@ bool operator==(all_of<Ts...> values, const T& target) {
return values.AllEqualTo(target, std::index_sequence_for<Ts...>{});
}
template <class T, class... Ts>
bool operator!=(const T& target, all_of<Ts...> values) {
return values.AllNotEqualTo(target, std::index_sequence_for<Ts...>{});
}
template <class... Ts>
std::ostream& operator<<(std::ostream& os, all_of<Ts...> all) {
return all.PrintTo(os, std::index_sequence_for<Ts...>{});
}
#ifdef DEBUG
bool ShouldSkipOptimizationStep();
#else
inline bool ShouldSkipOptimizationStep() { return false; }
#endif
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_UTILS_H_

View File

@ -5,6 +5,7 @@
#ifndef V8_COMPILER_TYPE_CACHE_H_
#define V8_COMPILER_TYPE_CACHE_H_
#include "src/compiler/globals.h"
#include "src/compiler/types.h"
#include "src/date/date.h"
#include "src/objects/js-array-buffer.h"
@ -202,10 +203,6 @@ class V8_EXPORT_PRIVATE TypeCache final {
}
Zone* zone() { return &zone_; }
static constexpr double kMaxDoubleRepresentableInt64 = 9223372036854774784.0;
static constexpr double kMaxDoubleRepresentableUint64 =
18446744073709549568.0;
};
} // namespace compiler

View File

@ -972,6 +972,14 @@ DEFINE_BOOL(turboshaft_trace_reduction, false,
"trace individual Turboshaft reduction steps")
DEFINE_BOOL(turboshaft_wasm, false,
"enable TurboFan's Turboshaft phases for wasm")
#ifdef DEBUG
DEFINE_UINT64(turboshaft_opt_bisect_limit, std::numeric_limits<uint64_t>::max(),
"stop applying optional optimizations after a specified number "
"of steps, useful for bisecting optimization bugs")
DEFINE_UINT64(turboshaft_opt_bisect_break, std::numeric_limits<uint64_t>::max(),
"abort after a specified number of steps, useful for bisecting "
"optimization bugs")
#endif // DEBUG
// Favor memory over execution speed.
DEFINE_BOOL(optimize_for_size, false,

View File

@ -1516,6 +1516,11 @@ bool IsSpecialIndex(String string) {
}
return true;
}
float DoubleToFloat32_NoInline(double x) { return DoubleToFloat32(x); }
int32_t DoubleToInt32_NoInline(double x) { return DoubleToInt32(x); }
} // namespace internal
} // namespace v8

View File

@ -61,12 +61,14 @@ inline double FastUI2D(unsigned x) {
// This function should match the exact semantics of ECMA-262 20.2.2.17.
inline float DoubleToFloat32(double x);
float DoubleToFloat32_NoInline(double x);
// This function should match the exact semantics of ECMA-262 9.4.
inline double DoubleToInteger(double x);
// This function should match the exact semantics of ECMA-262 9.5.
inline int32_t DoubleToInt32(double x);
int32_t DoubleToInt32_NoInline(double x);
// This function should match the exact semantics of ECMA-262 9.6.
inline uint32_t DoubleToUint32(double x);