[turbofan] Add support for overflow add/sub to the MachineOperatorReducer.
TEST=base-unittests,compiler-unittests,cctest R=svenpanne@chromium.org Review URL: https://codereview.chromium.org/555833002 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23809 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
cb6ea19859
commit
bbd96b97e4
@ -179,6 +179,8 @@
|
||||
// V8_HAS_BUILTIN_CTZ - __builtin_ctz() supported
|
||||
// V8_HAS_BUILTIN_EXPECT - __builtin_expect() supported
|
||||
// V8_HAS_BUILTIN_POPCOUNT - __builtin_popcount() supported
|
||||
// V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported
|
||||
// V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported
|
||||
// V8_HAS_DECLSPEC_ALIGN - __declspec(align(n)) supported
|
||||
// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported
|
||||
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
|
||||
@ -213,6 +215,8 @@
|
||||
# define V8_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz))
|
||||
# define V8_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect))
|
||||
# define V8_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
|
||||
# define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow))
|
||||
# define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow))
|
||||
|
||||
# define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
|
||||
# define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert))
|
||||
|
@ -2,6 +2,8 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/macros.h"
|
||||
#include "testing/gtest-support.h"
|
||||
@ -119,6 +121,47 @@ TEST(Bits, RotateRight64) {
|
||||
EXPECT_EQ(V8_UINT64_C(0x8000000000000000), RotateRight64(1, 1));
|
||||
}
|
||||
|
||||
|
||||
TEST(Bits, SignedAddOverflow32) {
|
||||
int32_t val = 0;
|
||||
EXPECT_FALSE(SignedAddOverflow32(0, 0, &val));
|
||||
EXPECT_EQ(0, val);
|
||||
EXPECT_TRUE(
|
||||
SignedAddOverflow32(std::numeric_limits<int32_t>::max(), 1, &val));
|
||||
EXPECT_EQ(std::numeric_limits<int32_t>::min(), val);
|
||||
EXPECT_TRUE(
|
||||
SignedAddOverflow32(std::numeric_limits<int32_t>::min(), -1, &val));
|
||||
EXPECT_EQ(std::numeric_limits<int32_t>::max(), val);
|
||||
EXPECT_TRUE(SignedAddOverflow32(std::numeric_limits<int32_t>::max(),
|
||||
std::numeric_limits<int32_t>::max(), &val));
|
||||
EXPECT_EQ(-2, val);
|
||||
TRACED_FORRANGE(int32_t, i, 1, 50) {
|
||||
TRACED_FORRANGE(int32_t, j, 1, i) {
|
||||
EXPECT_FALSE(SignedAddOverflow32(i, j, &val));
|
||||
EXPECT_EQ(i + j, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(Bits, SignedSubOverflow32) {
|
||||
int32_t val = 0;
|
||||
EXPECT_FALSE(SignedSubOverflow32(0, 0, &val));
|
||||
EXPECT_EQ(0, val);
|
||||
EXPECT_TRUE(
|
||||
SignedSubOverflow32(std::numeric_limits<int32_t>::min(), 1, &val));
|
||||
EXPECT_EQ(std::numeric_limits<int32_t>::max(), val);
|
||||
EXPECT_TRUE(
|
||||
SignedSubOverflow32(std::numeric_limits<int32_t>::max(), -1, &val));
|
||||
EXPECT_EQ(std::numeric_limits<int32_t>::min(), val);
|
||||
TRACED_FORRANGE(int32_t, i, 1, 50) {
|
||||
TRACED_FORRANGE(int32_t, j, 1, i) {
|
||||
EXPECT_FALSE(SignedSubOverflow32(i, j, &val));
|
||||
EXPECT_EQ(i - j, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace bits
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define V8_BASE_BITS_H_
|
||||
|
||||
#include "include/v8stdint.h"
|
||||
#include "src/base/macros.h"
|
||||
#if V8_CC_MSVC
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
@ -114,6 +115,34 @@ inline uint64_t RotateRight64(uint64_t value, uint64_t shift) {
|
||||
return (value >> shift) | (value << (64 - shift));
|
||||
}
|
||||
|
||||
|
||||
// SignedAddOverflow32(lhs,rhs,val) performs a signed summation of |lhs| and
|
||||
// |rhs| and stores the result into the variable pointed to by |val| and
|
||||
// returns true if the signed summation resulted in an overflow.
|
||||
inline bool SignedAddOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
|
||||
#if V8_HAS_BUILTIN_SADD_OVERFLOW
|
||||
return __builtin_sadd_overflow(lhs, rhs, val);
|
||||
#else
|
||||
uint32_t res = static_cast<uint32_t>(lhs) + static_cast<uint32_t>(rhs);
|
||||
*val = bit_cast<int32_t>(res);
|
||||
return ((res ^ lhs) & (res ^ rhs) & (1U << 31)) != 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// SignedSubOverflow32(lhs,rhs,val) performs a signed subtraction of |lhs| and
|
||||
// |rhs| and stores the result into the variable pointed to by |val| and
|
||||
// returns true if the signed subtraction resulted in an overflow.
|
||||
inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
|
||||
#if V8_HAS_BUILTIN_SSUB_OVERFLOW
|
||||
return __builtin_ssub_overflow(lhs, rhs, val);
|
||||
#else
|
||||
uint32_t res = static_cast<uint32_t>(lhs) - static_cast<uint32_t>(rhs);
|
||||
*val = bit_cast<int32_t>(res);
|
||||
return ((res ^ lhs) & (res ^ ~rhs) & (1U << 31)) != 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace bits
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
@ -443,6 +443,98 @@ TEST_F(MachineOperatorReducerTest, Word32RorWithConstants) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Int32AddWithOverflow
|
||||
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithZero) {
|
||||
Node* p0 = Parameter(0);
|
||||
{
|
||||
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
|
||||
Int32Constant(0), p0);
|
||||
|
||||
Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
|
||||
|
||||
r = Reduce(graph()->NewNode(common()->Projection(0), add));
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_EQ(p0, r.replacement());
|
||||
}
|
||||
{
|
||||
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), p0,
|
||||
Int32Constant(0));
|
||||
|
||||
Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
|
||||
|
||||
r = Reduce(graph()->NewNode(common()->Projection(0), add));
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_EQ(p0, r.replacement());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithConstant) {
|
||||
TRACED_FOREACH(int32_t, x, kInt32Values) {
|
||||
TRACED_FOREACH(int32_t, y, kInt32Values) {
|
||||
int32_t z;
|
||||
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
|
||||
Int32Constant(x), Int32Constant(y));
|
||||
|
||||
Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(),
|
||||
IsInt32Constant(base::bits::SignedAddOverflow32(x, y, &z)));
|
||||
|
||||
r = Reduce(graph()->NewNode(common()->Projection(0), add));
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(), IsInt32Constant(z));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Int32SubWithOverflow
|
||||
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithZero) {
|
||||
Node* p0 = Parameter(0);
|
||||
Node* add =
|
||||
graph()->NewNode(machine()->Int32SubWithOverflow(), p0, Int32Constant(0));
|
||||
|
||||
Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
|
||||
|
||||
r = Reduce(graph()->NewNode(common()->Projection(0), add));
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_EQ(p0, r.replacement());
|
||||
}
|
||||
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithConstant) {
|
||||
TRACED_FOREACH(int32_t, x, kInt32Values) {
|
||||
TRACED_FOREACH(int32_t, y, kInt32Values) {
|
||||
int32_t z;
|
||||
Node* add = graph()->NewNode(machine()->Int32SubWithOverflow(),
|
||||
Int32Constant(x), Int32Constant(y));
|
||||
|
||||
Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(),
|
||||
IsInt32Constant(base::bits::SignedSubOverflow32(x, y, &z)));
|
||||
|
||||
r = Reduce(graph()->NewNode(common()->Projection(0), add));
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(), IsInt32Constant(z));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -39,6 +39,8 @@ Node* MachineOperatorReducer::Int64Constant(int64_t value) {
|
||||
// Perform constant folding and strength reduction on machine operators.
|
||||
Reduction MachineOperatorReducer::Reduce(Node* node) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kProjection:
|
||||
return ReduceProjection(OpParameter<size_t>(node), node->InputAt(0));
|
||||
case IrOpcode::kWord32And: {
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.right().Is(0)) return Replace(m.right().node()); // x & 0 => 0
|
||||
@ -433,6 +435,43 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
|
||||
}
|
||||
|
||||
|
||||
Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kInt32AddWithOverflow: {
|
||||
DCHECK(index == 0 || index == 1);
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.IsFoldable()) {
|
||||
int32_t val;
|
||||
bool ovf = base::bits::SignedAddOverflow32(m.left().Value(),
|
||||
m.right().Value(), &val);
|
||||
return ReplaceInt32((index == 0) ? val : ovf);
|
||||
}
|
||||
if (m.right().Is(0)) {
|
||||
return (index == 0) ? Replace(m.left().node()) : ReplaceInt32(0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kInt32SubWithOverflow: {
|
||||
DCHECK(index == 0 || index == 1);
|
||||
Int32BinopMatcher m(node);
|
||||
if (m.IsFoldable()) {
|
||||
int32_t val;
|
||||
bool ovf = base::bits::SignedSubOverflow32(m.left().Value(),
|
||||
m.right().Value(), &val);
|
||||
return ReplaceInt32((index == 0) ? val : ovf);
|
||||
}
|
||||
if (m.right().Is(0)) {
|
||||
return (index == 0) ? Replace(m.left().node()) : ReplaceInt32(0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
|
||||
CommonOperatorBuilder* MachineOperatorReducer::common() const {
|
||||
return jsgraph()->common();
|
||||
}
|
||||
|
@ -42,6 +42,8 @@ class MachineOperatorReducer FINAL : public Reducer {
|
||||
return Replace(Int64Constant(value));
|
||||
}
|
||||
|
||||
Reduction ReduceProjection(size_t index, Node* node);
|
||||
|
||||
Graph* graph() const;
|
||||
JSGraph* jsgraph() const { return jsgraph_; }
|
||||
CommonOperatorBuilder* common() const;
|
||||
|
@ -4016,22 +4016,6 @@ TEST(RunSpillLotsOfThingsWithCall) {
|
||||
#endif // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
|
||||
|
||||
|
||||
static bool sadd_overflow(int32_t x, int32_t y, int32_t* val) {
|
||||
int32_t v =
|
||||
static_cast<int32_t>(static_cast<uint32_t>(x) + static_cast<uint32_t>(y));
|
||||
*val = v;
|
||||
return (((v ^ x) & (v ^ y)) >> 31) & 1;
|
||||
}
|
||||
|
||||
|
||||
static bool ssub_overflow(int32_t x, int32_t y, int32_t* val) {
|
||||
int32_t v =
|
||||
static_cast<int32_t>(static_cast<uint32_t>(x) - static_cast<uint32_t>(y));
|
||||
*val = v;
|
||||
return (((v ^ x) & (v ^ ~y)) >> 31) & 1;
|
||||
}
|
||||
|
||||
|
||||
TEST(RunInt32AddWithOverflowP) {
|
||||
int32_t actual_val = -1;
|
||||
RawMachineAssemblerTester<int32_t> m;
|
||||
@ -4044,7 +4028,7 @@ TEST(RunInt32AddWithOverflowP) {
|
||||
FOR_INT32_INPUTS(i) {
|
||||
FOR_INT32_INPUTS(j) {
|
||||
int32_t expected_val;
|
||||
int expected_ovf = sadd_overflow(*i, *j, &expected_val);
|
||||
int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
|
||||
CHECK_EQ(expected_ovf, bt.call(*i, *j));
|
||||
CHECK_EQ(expected_val, actual_val);
|
||||
}
|
||||
@ -4063,7 +4047,7 @@ TEST(RunInt32AddWithOverflowImm) {
|
||||
m.StoreToPointer(&actual_val, kMachInt32, val);
|
||||
m.Return(ovf);
|
||||
FOR_INT32_INPUTS(j) {
|
||||
int expected_ovf = sadd_overflow(*i, *j, &expected_val);
|
||||
int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
|
||||
CHECK_EQ(expected_ovf, m.Call(*j));
|
||||
CHECK_EQ(expected_val, actual_val);
|
||||
}
|
||||
@ -4076,7 +4060,7 @@ TEST(RunInt32AddWithOverflowImm) {
|
||||
m.StoreToPointer(&actual_val, kMachInt32, val);
|
||||
m.Return(ovf);
|
||||
FOR_INT32_INPUTS(j) {
|
||||
int expected_ovf = sadd_overflow(*i, *j, &expected_val);
|
||||
int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
|
||||
CHECK_EQ(expected_ovf, m.Call(*j));
|
||||
CHECK_EQ(expected_val, actual_val);
|
||||
}
|
||||
@ -4089,7 +4073,7 @@ TEST(RunInt32AddWithOverflowImm) {
|
||||
Node* ovf = m.Projection(1, add);
|
||||
m.StoreToPointer(&actual_val, kMachInt32, val);
|
||||
m.Return(ovf);
|
||||
int expected_ovf = sadd_overflow(*i, *j, &expected_val);
|
||||
int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
|
||||
CHECK_EQ(expected_ovf, m.Call());
|
||||
CHECK_EQ(expected_val, actual_val);
|
||||
}
|
||||
@ -4113,7 +4097,7 @@ TEST(RunInt32AddWithOverflowInBranchP) {
|
||||
FOR_INT32_INPUTS(i) {
|
||||
FOR_INT32_INPUTS(j) {
|
||||
int32_t expected;
|
||||
if (sadd_overflow(*i, *j, &expected)) expected = constant;
|
||||
if (bits::SignedAddOverflow32(*i, *j, &expected)) expected = constant;
|
||||
CHECK_EQ(expected, bt.call(*i, *j));
|
||||
}
|
||||
}
|
||||
@ -4132,7 +4116,7 @@ TEST(RunInt32SubWithOverflowP) {
|
||||
FOR_INT32_INPUTS(i) {
|
||||
FOR_INT32_INPUTS(j) {
|
||||
int32_t expected_val;
|
||||
int expected_ovf = ssub_overflow(*i, *j, &expected_val);
|
||||
int expected_ovf = bits::SignedSubOverflow32(*i, *j, &expected_val);
|
||||
CHECK_EQ(expected_ovf, bt.call(*i, *j));
|
||||
CHECK_EQ(expected_val, actual_val);
|
||||
}
|
||||
@ -4151,7 +4135,7 @@ TEST(RunInt32SubWithOverflowImm) {
|
||||
m.StoreToPointer(&actual_val, kMachInt32, val);
|
||||
m.Return(ovf);
|
||||
FOR_INT32_INPUTS(j) {
|
||||
int expected_ovf = ssub_overflow(*i, *j, &expected_val);
|
||||
int expected_ovf = bits::SignedSubOverflow32(*i, *j, &expected_val);
|
||||
CHECK_EQ(expected_ovf, m.Call(*j));
|
||||
CHECK_EQ(expected_val, actual_val);
|
||||
}
|
||||
@ -4164,7 +4148,7 @@ TEST(RunInt32SubWithOverflowImm) {
|
||||
m.StoreToPointer(&actual_val, kMachInt32, val);
|
||||
m.Return(ovf);
|
||||
FOR_INT32_INPUTS(j) {
|
||||
int expected_ovf = ssub_overflow(*j, *i, &expected_val);
|
||||
int expected_ovf = bits::SignedSubOverflow32(*j, *i, &expected_val);
|
||||
CHECK_EQ(expected_ovf, m.Call(*j));
|
||||
CHECK_EQ(expected_val, actual_val);
|
||||
}
|
||||
@ -4177,7 +4161,7 @@ TEST(RunInt32SubWithOverflowImm) {
|
||||
Node* ovf = m.Projection(1, add);
|
||||
m.StoreToPointer(&actual_val, kMachInt32, val);
|
||||
m.Return(ovf);
|
||||
int expected_ovf = ssub_overflow(*i, *j, &expected_val);
|
||||
int expected_ovf = bits::SignedSubOverflow32(*i, *j, &expected_val);
|
||||
CHECK_EQ(expected_ovf, m.Call());
|
||||
CHECK_EQ(expected_val, actual_val);
|
||||
}
|
||||
@ -4201,7 +4185,7 @@ TEST(RunInt32SubWithOverflowInBranchP) {
|
||||
FOR_INT32_INPUTS(i) {
|
||||
FOR_INT32_INPUTS(j) {
|
||||
int32_t expected;
|
||||
if (ssub_overflow(*i, *j, &expected)) expected = constant;
|
||||
if (bits::SignedSubOverflow32(*i, *j, &expected)) expected = constant;
|
||||
CHECK_EQ(expected, bt.call(*i, *j));
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user