[turbofan] Do constant folding for Float64Pow.

Also unify the Pow implementation somewhat. There are still some
inconsistencies with the FPU version for x64/ia32, but that has
to be resolved separately.

R=ahaas@chromium.org, mvstanton@chromium.org
BUG=v8:5086

Review-Url: https://codereview.chromium.org/2333663002
Cr-Commit-Position: refs/heads/master@{#39368}
This commit is contained in:
bmeurer 2016-09-13 00:09:55 -07:00 committed by Commit bot
parent 9f42d6c2c0
commit e786ef2474
4 changed files with 38 additions and 5 deletions

View File

@ -541,8 +541,9 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Pow: {
Float64BinopMatcher m(node);
// TODO(bmeurer): Constant fold once we have a unified pow implementation.
if (m.right().Is(0.0)) { // x ** +-0.0 => 1.0
if (m.IsFoldable()) {
return ReplaceFloat64(Pow(m.left().Value(), m.right().Value()));
} else if (m.right().Is(0.0)) { // x ** +-0.0 => 1.0
return ReplaceFloat64(1.0);
} else if (m.right().Is(-2.0)) { // x ** -2.0 => 1 / (x * x)
node->ReplaceInput(0, Float64Constant(1.0));

View File

@ -234,6 +234,10 @@ inline double Floor(double x) {
}
inline double Pow(double x, double y) {
if (y == 0.0) return 1.0;
if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
return std::numeric_limits<double>::quiet_NaN();
}
#if (defined(__MINGW64_VERSION_MAJOR) && \
(!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)) || \
defined(V8_OS_AIX)

View File

@ -206,9 +206,6 @@ uint32_t word64_popcnt_wrapper(uint64_t* input) {
void float64_pow_wrapper(double* param0, double* param1) {
double x = ReadDoubleValue(param0);
double y = ReadDoubleValue(param1);
if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
WriteDoubleValue(param0, std::numeric_limits<double>::quiet_NaN());
}
WriteDoubleValue(param0, Pow(x, y));
}
} // namespace wasm

View File

@ -1771,6 +1771,37 @@ TEST_F(MachineOperatorReducerTest, Float64Log1pWithConstant) {
}
}
// -----------------------------------------------------------------------------
// Float64Pow
TEST_F(MachineOperatorReducerTest, Float64PowWithConstant) {
TRACED_FOREACH(double, x, kFloat64Values) {
TRACED_FOREACH(double, y, kFloat64Values) {
Reduction const r = Reduce(graph()->NewNode(
machine()->Float64Pow(), Float64Constant(x), Float64Constant(y)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsFloat64Constant(NanSensitiveDoubleEq(Pow(x, y))));
}
}
}
TEST_F(MachineOperatorReducerTest, Float64PowWithZeroExponent) {
Node* const p0 = Parameter(0);
{
Reduction const r = Reduce(
graph()->NewNode(machine()->Float64Pow(), p0, Float64Constant(-0.0)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64Constant(1.0));
}
{
Reduction const r = Reduce(
graph()->NewNode(machine()->Float64Pow(), p0, Float64Constant(0.0)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64Constant(1.0));
}
}
// -----------------------------------------------------------------------------
// Float64Sin