diff --git a/src/codegen/code-stub-assembler.cc b/src/codegen/code-stub-assembler.cc index d514b73327..8e1e85de7e 100644 --- a/src/codegen/code-stub-assembler.cc +++ b/src/codegen/code-stub-assembler.cc @@ -763,8 +763,8 @@ TNode CodeStubAssembler::IsValidSmiIndex(TNode smi) { TNode CodeStubAssembler::TaggedIndexToIntPtr( TNode value) { - return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value), - IntPtrConstant(kSmiTagSize))); + return Signed(WordSarShiftOutZeros(BitcastTaggedToWordForTagAndSmiBits(value), + IntPtrConstant(kSmiTagSize))); } TNode CodeStubAssembler::IntPtrToTaggedIndex( @@ -859,16 +859,17 @@ TNode CodeStubAssembler::SmiUntag(SloppyTNode value) { if (ToIntPtrConstant(value, &constant_value)) { return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize)); } + TNode raw_bits = BitcastTaggedToWordForTagAndSmiBits(value); if (COMPRESS_POINTERS_BOOL) { - return ChangeInt32ToIntPtr(SmiToInt32(value)); + // Clear the upper half using sign-extension. + raw_bits = ChangeInt32ToIntPtr(TruncateIntPtrToInt32(raw_bits)); } - return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value), - SmiShiftBitsConstant())); + return Signed(WordSarShiftOutZeros(raw_bits, SmiShiftBitsConstant())); } TNode CodeStubAssembler::SmiToInt32(SloppyTNode value) { if (COMPRESS_POINTERS_BOOL) { - return Signed(Word32Sar( + return Signed(Word32SarShiftOutZeros( TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)), SmiShiftBitsConstant32())); } diff --git a/src/compiler/backend/instruction-selector.cc b/src/compiler/backend/instruction-selector.cc index 6a1a170189..027d0c3d43 100644 --- a/src/compiler/backend/instruction-selector.cc +++ b/src/compiler/backend/instruction-selector.cc @@ -12,6 +12,7 @@ #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/compiler-source-position-table.h" #include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties.h" #include "src/compiler/pipeline.h" #include "src/compiler/schedule.h" #include "src/compiler/state-values-utils.h" @@ -3014,7 +3015,8 @@ void InstructionSelector::VisitUnreachable(Node* node) { } void InstructionSelector::VisitStaticAssert(Node* node) { - node->InputAt(0)->Print(); + Node* asserted = node->InputAt(0); + asserted->Print(2); FATAL("Expected turbofan static assert to hold, but got non-true input!\n"); } diff --git a/src/compiler/code-assembler.h b/src/compiler/code-assembler.h index 95c38b577e..d9d81cfe30 100644 --- a/src/compiler/code-assembler.h +++ b/src/compiler/code-assembler.h @@ -273,6 +273,7 @@ class CodeAssemblerParameterizedLabel; V(WordShl, WordT, WordT, IntegralT) \ V(WordShr, WordT, WordT, IntegralT) \ V(WordSar, WordT, WordT, IntegralT) \ + V(WordSarShiftOutZeros, WordT, WordT, IntegralT) \ V(Word32Or, Word32T, Word32T, Word32T) \ V(Word32And, Word32T, Word32T, Word32T) \ V(Word32Xor, Word32T, Word32T, Word32T) \ @@ -280,6 +281,7 @@ class CodeAssemblerParameterizedLabel; V(Word32Shl, Word32T, Word32T, Word32T) \ V(Word32Shr, Word32T, Word32T, Word32T) \ V(Word32Sar, Word32T, Word32T, Word32T) \ + V(Word32SarShiftOutZeros, Word32T, Word32T, Word32T) \ V(Word64And, Word64T, Word64T, Word64T) \ V(Word64Or, Word64T, Word64T, Word64T) \ V(Word64Xor, Word64T, Word64T, Word64T) \ diff --git a/src/compiler/effect-control-linearizer.cc b/src/compiler/effect-control-linearizer.cc index d3344b9545..7bc8bcb1b1 100644 --- a/src/compiler/effect-control-linearizer.cc +++ b/src/compiler/effect-control-linearizer.cc @@ -4552,18 +4552,20 @@ Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) { } Node* EffectControlLinearizer::ChangeSmiToIntPtr(Node* value) { - // Do shift on 32bit values if Smis are stored in the lower word. if (machine()->Is64() && SmiValuesAre31Bits()) { - return __ ChangeInt32ToInt64( - __ Word32Sar(__ TruncateInt64ToInt32(value), SmiShiftBitsConstant())); + // First sign-extend the upper half, then shift away the Smi tag. + return __ WordSarShiftOutZeros( + __ ChangeInt32ToInt64(__ TruncateInt64ToInt32(value)), + SmiShiftBitsConstant()); } - return __ WordSar(value, SmiShiftBitsConstant()); + return __ WordSarShiftOutZeros(value, SmiShiftBitsConstant()); } Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) { // Do shift on 32bit values if Smis are stored in the lower word. if (machine()->Is64() && SmiValuesAre31Bits()) { - return __ Word32Sar(__ TruncateInt64ToInt32(value), SmiShiftBitsConstant()); + return __ Word32SarShiftOutZeros(__ TruncateInt64ToInt32(value), + SmiShiftBitsConstant()); } if (machine()->Is64()) { return __ TruncateInt64ToInt32(ChangeSmiToIntPtr(value)); diff --git a/src/compiler/graph-assembler.h b/src/compiler/graph-assembler.h index b9f605ae6e..128e03357d 100644 --- a/src/compiler/graph-assembler.h +++ b/src/compiler/graph-assembler.h @@ -82,6 +82,7 @@ class BasicBlock; V(Word32Equal) \ V(Word32Or) \ V(Word32Sar) \ + V(Word32SarShiftOutZeros) \ V(Word32Shl) \ V(Word32Shr) \ V(Word32Xor) \ @@ -91,6 +92,7 @@ class BasicBlock; V(WordAnd) \ V(WordEqual) \ V(WordSar) \ + V(WordSarShiftOutZeros) \ V(WordShl) #define CHECKED_ASSEMBLER_MACH_BINOP_LIST(V) \ diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc index f24581eec7..1b60029169 100644 --- a/src/compiler/machine-operator-reducer.cc +++ b/src/compiler/machine-operator-reducer.cc @@ -4,6 +4,7 @@ #include "src/compiler/machine-operator-reducer.h" #include +#include #include "src/base/bits.h" #include "src/base/division-by-constant.h" @@ -14,6 +15,7 @@ #include "src/compiler/machine-graph.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" +#include "src/compiler/opcodes.h" #include "src/numbers/conversions-inl.h" namespace v8 { @@ -423,7 +425,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { return ReplaceBool(true); } } - break; + return ReduceWord32Comparisons(node); } case IrOpcode::kInt32LessThanOrEqual: { Int32BinopMatcher m(node); @@ -431,7 +433,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { return ReplaceBool(m.left().Value() <= m.right().Value()); } if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true - break; + return ReduceWord32Comparisons(node); } case IrOpcode::kUint32LessThan: { Uint32BinopMatcher m(node); @@ -456,7 +458,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { // TODO(turbofan): else the comparison is always true. } } - break; + return ReduceWord32Comparisons(node); } case IrOpcode::kUint32LessThanOrEqual: { Uint32BinopMatcher m(node); @@ -466,7 +468,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { return ReplaceBool(m.left().Value() <= m.right().Value()); } if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true - break; + return ReduceWord32Comparisons(node); } case IrOpcode::kFloat32Sub: { Float32BinopMatcher m(node); @@ -873,6 +875,11 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { case IrOpcode::kTrapIf: case IrOpcode::kTrapUnless: return ReduceConditional(node); + case IrOpcode::kInt64LessThan: + case IrOpcode::kInt64LessThanOrEqual: + case IrOpcode::kUint64LessThan: + case IrOpcode::kUint64LessThanOrEqual: + return ReduceWord64Comparisons(node); default: break; } @@ -1247,6 +1254,78 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) { return NoChange(); } +Reduction MachineOperatorReducer::ReduceWord32Comparisons(Node* node) { + DCHECK(node->opcode() == IrOpcode::kInt32LessThan || + node->opcode() == IrOpcode::kInt32LessThanOrEqual || + node->opcode() == IrOpcode::kUint32LessThan || + node->opcode() == IrOpcode::kUint32LessThanOrEqual); + Int32BinopMatcher m(node); + // (x >>> K) < (y >>> K) => x < y if only zeros shifted out + if (m.left().op() == machine()->Word32SarShiftOutZeros() && + m.right().op() == machine()->Word32SarShiftOutZeros()) { + Int32BinopMatcher mleft(m.left().node()); + Int32BinopMatcher mright(m.right().node()); + if (mleft.right().HasValue() && mright.right().Is(mleft.right().Value())) { + node->ReplaceInput(0, mleft.left().node()); + node->ReplaceInput(1, mright.left().node()); + return Changed(node); + } + } + return NoChange(); +} + +const Operator* MachineOperatorReducer::Map64To32Comparison( + const Operator* op, bool sign_extended) { + switch (op->opcode()) { + case IrOpcode::kInt64LessThan: + return sign_extended ? machine()->Int32LessThan() + : machine()->Uint32LessThan(); + case IrOpcode::kInt64LessThanOrEqual: + return sign_extended ? machine()->Int32LessThanOrEqual() + : machine()->Uint32LessThanOrEqual(); + case IrOpcode::kUint64LessThan: + return machine()->Uint32LessThan(); + case IrOpcode::kUint64LessThanOrEqual: + return machine()->Uint32LessThanOrEqual(); + default: + UNREACHABLE(); + } +} + +Reduction MachineOperatorReducer::ReduceWord64Comparisons(Node* node) { + DCHECK(node->opcode() == IrOpcode::kInt64LessThan || + node->opcode() == IrOpcode::kInt64LessThanOrEqual || + node->opcode() == IrOpcode::kUint64LessThan || + node->opcode() == IrOpcode::kUint64LessThanOrEqual); + Int64BinopMatcher m(node); + + bool sign_extended = + m.left().IsChangeInt32ToInt64() && m.right().IsChangeInt32ToInt64(); + if (sign_extended || (m.left().IsChangeUint32ToUint64() && + m.right().IsChangeUint32ToUint64())) { + node->ReplaceInput(0, NodeProperties::GetValueInput(m.left().node(), 0)); + node->ReplaceInput(1, NodeProperties::GetValueInput(m.right().node(), 0)); + NodeProperties::ChangeOp(node, + Map64To32Comparison(node->op(), sign_extended)); + return Changed(node).FollowedBy(Reduce(node)); + } + + // (x >>> K) < (y >>> K) => x < y if only zeros shifted out + // This is useful for Smi untagging, which results in such a shift. + if (m.left().op() == machine()->Word64SarShiftOutZeros() && + m.right().op() == machine()->Word64SarShiftOutZeros()) { + Int64BinopMatcher mleft(m.left().node()); + Int64BinopMatcher mright(m.right().node()); + if (mleft.right().HasValue() && mright.right().Is(mleft.right().Value())) { + node->ReplaceInput(0, mleft.left().node()); + node->ReplaceInput(1, mright.left().node()); + return Changed(node); + } + } + + return NoChange(); +} + Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) { DCHECK((node->opcode() == IrOpcode::kWord32Shl) || (node->opcode() == IrOpcode::kWord32Shr) || @@ -1275,14 +1354,42 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) { base::ShlWithWraparound(m.left().Value(), m.right().Value())); } if (m.right().IsInRange(1, 31)) { - // (x >>> K) << K => x & ~(2^K - 1) - // (x >> K) << K => x & ~(2^K - 1) if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) { Int32BinopMatcher mleft(m.left().node()); + + // If x >> K only shifted out zeros: + // (x >> K) << L => x if K == L + // (x >> K) << L => x >> (K-L) if K > L + // (x >> K) << L => x << (L-K) if K < L + // Since this is used for Smi untagging, we currently only need it for + // signed shifts. + if (mleft.op() == machine()->Word32SarShiftOutZeros() && + mleft.right().IsInRange(1, 31)) { + Node* x = mleft.left().node(); + int k = mleft.right().Value(); + int l = m.right().Value(); + if (k == l) { + return Replace(x); + } else if (k > l) { + node->ReplaceInput(0, x); + node->ReplaceInput(1, Uint32Constant(k - l)); + NodeProperties::ChangeOp(node, machine()->Word32Sar()); + return Changed(node).FollowedBy(ReduceWord32Sar(node)); + } else { + DCHECK(k < l); + node->ReplaceInput(0, x); + node->ReplaceInput(1, Uint32Constant(l - k)); + return Changed(node); + } + } + + // (x >>> K) << K => x & ~(2^K - 1) + // (x >> K) << K => x & ~(2^K - 1) if (mleft.right().Is(m.right().Value())) { node->ReplaceInput(0, mleft.left().node()); node->ReplaceInput(1, - Uint32Constant(~((1U << m.right().Value()) - 1U))); + Uint32Constant(std::numeric_limits::max() + << m.right().Value())); NodeProperties::ChangeOp(node, machine()->Word32And()); return Changed(node).FollowedBy(ReduceWord32And(node)); } @@ -1299,6 +1406,46 @@ Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) { return ReplaceInt64( base::ShlWithWraparound(m.left().Value(), m.right().Value())); } + if (m.right().IsInRange(1, 63) && + (m.left().IsWord64Sar() || m.left().IsWord64Shr())) { + Int64BinopMatcher mleft(m.left().node()); + + // If x >> K only shifted out zeros: + // (x >> K) << L => x if K == L + // (x >> K) << L => x >> (K-L) if K > L + // (x >> K) << L => x << (L-K) if K < L + // Since this is used for Smi untagging, we currently only need it for + // signed shifts. + if (mleft.op() == machine()->Word64SarShiftOutZeros() && + mleft.right().IsInRange(1, 63)) { + Node* x = mleft.left().node(); + int64_t k = mleft.right().Value(); + int64_t l = m.right().Value(); + if (k == l) { + return Replace(x); + } else if (k > l) { + node->ReplaceInput(0, x); + node->ReplaceInput(1, Uint64Constant(k - l)); + NodeProperties::ChangeOp(node, machine()->Word64Sar()); + return Changed(node).FollowedBy(ReduceWord64Sar(node)); + } else { + DCHECK(k < l); + node->ReplaceInput(0, x); + node->ReplaceInput(1, Uint64Constant(l - k)); + return Changed(node); + } + } + + // (x >>> K) << K => x & ~(2^K - 1) + // (x >> K) << K => x & ~(2^K - 1) + if (mleft.right().Is(m.right().Value())) { + node->ReplaceInput(0, mleft.left().node()); + node->ReplaceInput(1, Uint64Constant(std::numeric_limits::max() + << m.right().Value())); + NodeProperties::ChangeOp(node, machine()->Word64And()); + return Changed(node).FollowedBy(ReduceWord64And(node)); + } + } return NoChange(); } diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h index 63a1f2f1ab..7970daefce 100644 --- a/src/compiler/machine-operator-reducer.h +++ b/src/compiler/machine-operator-reducer.h @@ -92,6 +92,9 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final Reduction ReduceUint32Mod(Node* node); Reduction ReduceStore(Node* node); Reduction ReduceProjection(size_t index, Node* node); + const Operator* Map64To32Comparison(const Operator* op, bool sign_extended); + Reduction ReduceWord32Comparisons(Node* node); + Reduction ReduceWord64Comparisons(Node* node); Reduction ReduceWord32Shifts(Node* node); Reduction ReduceWord32Shl(Node* node); Reduction ReduceWord64Shl(Node* node); diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc index beece61f73..f3f12a5bb3 100644 --- a/src/compiler/machine-operator.cc +++ b/src/compiler/machine-operator.cc @@ -106,7 +106,6 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) { return OpParameter(op); } - StoreRepresentation const& StoreRepresentationOf(Operator const* op) { DCHECK(IrOpcode::kStore == op->opcode() || IrOpcode::kProtectedStore == op->opcode()); @@ -150,6 +149,22 @@ MachineType AtomicOpType(Operator const* op) { return OpParameter(op); } +size_t hash_value(ShiftKind kind) { return static_cast(kind); } +V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, ShiftKind kind) { + switch (kind) { + case ShiftKind::kNormal: + return os << "Normal"; + case ShiftKind::kShiftOutZeros: + return os << "ShiftOutZeros"; + } +} + +ShiftKind ShiftKindOf(Operator const* op) { + DCHECK(IrOpcode::kWord32Sar == op->opcode() || + IrOpcode::kWord64Sar == op->opcode()); + return OpParameter(op); +} + // The format is: // V(Name, properties, value_input_count, control_input_count, output_count) #define PURE_BINARY_OP_LIST_32(V) \ @@ -158,7 +173,6 @@ MachineType AtomicOpType(Operator const* op) { V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ V(Word32Shl, Operator::kNoProperties, 2, 0, 1) \ V(Word32Shr, Operator::kNoProperties, 2, 0, 1) \ - V(Word32Sar, Operator::kNoProperties, 2, 0, 1) \ V(Word32Ror, Operator::kNoProperties, 2, 0, 1) \ V(Word32Equal, Operator::kCommutative, 2, 0, 1) \ V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ @@ -183,7 +197,6 @@ MachineType AtomicOpType(Operator const* op) { V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ V(Word64Shl, Operator::kNoProperties, 2, 0, 1) \ V(Word64Shr, Operator::kNoProperties, 2, 0, 1) \ - V(Word64Sar, Operator::kNoProperties, 2, 0, 1) \ V(Word64Ror, Operator::kNoProperties, 2, 0, 1) \ V(Word64Equal, Operator::kCommutative, 2, 0, 1) \ V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ @@ -652,6 +665,38 @@ PURE_OPTIONAL_OP_LIST(PURE) OVERFLOW_OP_LIST(OVERFLOW_OP) #undef OVERFLOW_OP +template +struct Word32SarOperator : Operator1 { + Word32SarOperator() + : Operator1(IrOpcode::kWord32Sar, Operator::kPure, "Word32Sar", 2, 0, 0, + 1, 0, 0, kind) {} +}; + +const Operator* MachineOperatorBuilder::Word32Sar(ShiftKind kind) { + switch (kind) { + case ShiftKind::kNormal: + return GetCachedOperator>(); + case ShiftKind::kShiftOutZeros: + return GetCachedOperator>(); + } +} + +template +struct Word64SarOperator : Operator1 { + Word64SarOperator() + : Operator1(IrOpcode::kWord64Sar, Operator::kPure, "Word64Sar", 2, 0, 0, + 1, 0, 0, kind) {} +}; + +const Operator* MachineOperatorBuilder::Word64Sar(ShiftKind kind) { + switch (kind) { + case ShiftKind::kNormal: + return GetCachedOperator>(); + case ShiftKind::kShiftOutZeros: + return GetCachedOperator>(); + } +} + template struct LoadOperator : public Operator1 { LoadOperator() diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h index 6702906881..2424ec4db8 100644 --- a/src/compiler/machine-operator.h +++ b/src/compiler/machine-operator.h @@ -180,6 +180,16 @@ V8_EXPORT_PRIVATE S8x16ShuffleParameter const& S8x16ShuffleParameterOf( StackCheckKind StackCheckKindOf(Operator const* op) V8_WARN_UNUSED_RESULT; +// ShiftKind::kShiftOutZeros means that it is guaranteed that the bits shifted +// out of the left operand are all zeros. If this is not the case, undefined +// behavior (i.e., incorrect optimizations) will happen. +// This is mostly useful for Smi untagging. +enum class ShiftKind { kNormal, kShiftOutZeros }; + +size_t hash_value(ShiftKind); +V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ShiftKind); +ShiftKind ShiftKindOf(Operator const*) V8_WARN_UNUSED_RESULT; + // Interface for building machine-level operators. These operators are // machine-level but machine-independent and thus define a language suitable // for generating code to run on architectures such as ia32, x64, arm, etc. @@ -293,7 +303,11 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* Word32Xor(); const Operator* Word32Shl(); const Operator* Word32Shr(); - const Operator* Word32Sar(); + const Operator* Word32Sar(ShiftKind kind); + const Operator* Word32Sar() { return Word32Sar(ShiftKind::kNormal); } + const Operator* Word32SarShiftOutZeros() { + return Word32Sar(ShiftKind::kShiftOutZeros); + } const Operator* Word32Ror(); const Operator* Word32Equal(); const Operator* Word32Clz(); @@ -318,7 +332,11 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* Word64Xor(); const Operator* Word64Shl(); const Operator* Word64Shr(); - const Operator* Word64Sar(); + const Operator* Word64Sar(ShiftKind kind); + const Operator* Word64Sar() { return Word64Sar(ShiftKind::kNormal); } + const Operator* Word64SarShiftOutZeros() { + return Word64Sar(ShiftKind::kShiftOutZeros); + } const Operator* Word64Ror(); const Operator* Word64Clz(); const OptionalOperator Word64Ctz(); @@ -841,7 +859,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final V(Word, Xor) \ V(Word, Shl) \ V(Word, Shr) \ - V(Word, Sar) \ V(Word, Ror) \ V(Word, Clz) \ V(Word, Equal) \ @@ -864,6 +881,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final #undef PSEUDO_OP #undef PSEUDO_OP_LIST + const Operator* WordSar(ShiftKind kind = ShiftKind::kNormal) { + return Is32() ? Word32Sar(kind) : Word64Sar(kind); + } + const Operator* WordSarShiftOutZeros() { + return WordSar(ShiftKind::kShiftOutZeros); + } + private: Zone* zone_; MachineRepresentation const word_; diff --git a/src/compiler/node.cc b/src/compiler/node.cc index 1bff71495c..5e7c9fcc39 100644 --- a/src/compiler/node.cc +++ b/src/compiler/node.cc @@ -314,22 +314,32 @@ bool Node::OwnedBy(Node const* owner1, Node const* owner2) const { return mask == 3; } -void Node::Print() const { +void Node::Print(int depth) const { StdoutStream os; - Print(os); + Print(os, depth); } -void Node::Print(std::ostream& os) const { - os << *this << std::endl; - for (Node* input : this->inputs()) { +namespace { +void PrintNode(const Node* node, std::ostream& os, int depth, + int indentation = 0) { + for (int i = 0; i < indentation; ++i) { os << " "; - if (input) { - os << *input; - } else { - os << "(NULL)"; - } - os << std::endl; } + if (node) { + os << *node; + } else { + os << "(NULL)"; + } + os << std::endl; + if (depth <= 0) return; + for (Node* input : node->inputs()) { + PrintNode(input, os, depth - 1, indentation + 1); + } +} +} // namespace + +void Node::Print(std::ostream& os, int depth) const { + PrintNode(this, os, depth); } std::ostream& operator<<(std::ostream& os, const Node& n) { diff --git a/src/compiler/node.h b/src/compiler/node.h index 41dca441f2..4ba7077896 100644 --- a/src/compiler/node.h +++ b/src/compiler/node.h @@ -144,8 +144,8 @@ class V8_EXPORT_PRIVATE Node final { // Returns true if {owner1} and {owner2} are the only users of {this} node. bool OwnedBy(Node const* owner1, Node const* owner2) const; - void Print() const; - void Print(std::ostream&) const; + void Print(int depth = 1) const; + void Print(std::ostream&, int depth = 1) const; private: struct Use; diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index 95d8fffd29..77603a9dc4 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -1955,10 +1955,12 @@ struct CsaOptimizationPhase { CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), data->broker(), data->common(), data->machine(), temp_zone); + ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone()); AddReducer(data, &graph_reducer, &branch_condition_elimination); AddReducer(data, &graph_reducer, &dead_code_elimination); AddReducer(data, &graph_reducer, &machine_reducer); AddReducer(data, &graph_reducer, &common_reducer); + AddReducer(data, &graph_reducer, &value_numbering); graph_reducer.ReduceGraph(); } }; diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h index 6298513f55..cbd7722dd2 100644 --- a/src/compiler/raw-machine-assembler.h +++ b/src/compiler/raw-machine-assembler.h @@ -311,6 +311,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { Node* WordSar(Node* a, Node* b) { return AddNode(machine()->WordSar(), a, b); } + Node* WordSarShiftOutZeros(Node* a, Node* b) { + return AddNode(machine()->WordSarShiftOutZeros(), a, b); + } Node* WordRor(Node* a, Node* b) { return AddNode(machine()->WordRor(), a, b); } @@ -346,6 +349,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { Node* Word32Sar(Node* a, Node* b) { return AddNode(machine()->Word32Sar(), a, b); } + Node* Word32SarShiftOutZeros(Node* a, Node* b) { + return AddNode(machine()->Word32SarShiftOutZeros(), a, b); + } Node* Word32Ror(Node* a, Node* b) { return AddNode(machine()->Word32Ror(), a, b); } diff --git a/test/cctest/test-code-stub-assembler.cc b/test/cctest/test-code-stub-assembler.cc index 2b89220743..40bb478c4d 100644 --- a/test/cctest/test-code-stub-assembler.cc +++ b/test/cctest/test-code-stub-assembler.cc @@ -3940,6 +3940,54 @@ TEST(WasmTaggedToFloat64) { } } +TEST(SmiUntagLeftShiftOptimization) { + Isolate* isolate(CcTest::InitIsolateOnce()); + const int kNumParams = 1; + CodeAssemblerTester asm_tester(isolate, kNumParams); + CodeStubAssembler m(asm_tester.state()); + + { + TNode param = + TNode::UncheckedCast(m.Parameter(0)); + TNode unoptimized = + m.IntPtrMul(m.TaggedIndexToIntPtr(param), m.IntPtrConstant(8)); + TNode optimized = m.WordShl( + m.BitcastTaggedToWordForTagAndSmiBits(param), 3 - kSmiTagSize); + m.StaticAssert(m.WordEqual(unoptimized, optimized)); + m.Return(m.UndefinedConstant()); + } + + AssemblerOptions options = AssemblerOptions::Default(isolate); + FunctionTester ft(asm_tester.GenerateCode(options), kNumParams); +} + +TEST(SmiUntagComparisonOptimization) { + Isolate* isolate(CcTest::InitIsolateOnce()); + const int kNumParams = 2; + CodeAssemblerTester asm_tester(isolate, kNumParams); + CodeStubAssembler m(asm_tester.state()); + + { + TNode a = TNode::UncheckedCast(m.Parameter(0)); + TNode b = TNode::UncheckedCast(m.Parameter(1)); + TNode unoptimized = m.UintPtrLessThan(m.SmiUntag(a), m.SmiUntag(b)); +#ifdef V8_COMPRESS_POINTERS + TNode optimized = m.Uint32LessThan( + m.TruncateIntPtrToInt32(m.BitcastTaggedToWordForTagAndSmiBits(a)), + m.TruncateIntPtrToInt32(m.BitcastTaggedToWordForTagAndSmiBits(b))); +#else + TNode optimized = + m.UintPtrLessThan(m.BitcastTaggedToWordForTagAndSmiBits(a), + m.BitcastTaggedToWordForTagAndSmiBits(b)); +#endif + m.StaticAssert(m.Word32Equal(unoptimized, optimized)); + m.Return(m.UndefinedConstant()); + } + + AssemblerOptions options = AssemblerOptions::Default(isolate); + FunctionTester ft(asm_tester.GenerateCode(options), kNumParams); +} + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/test/unittests/compiler/machine-operator-reducer-unittest.cc b/test/unittests/compiler/machine-operator-reducer-unittest.cc index eb06d94d6d..2c2d3403f3 100644 --- a/test/unittests/compiler/machine-operator-reducer-unittest.cc +++ b/test/unittests/compiler/machine-operator-reducer-unittest.cc @@ -3,6 +3,7 @@ // found in the LICENSE file. #include "src/compiler/machine-operator-reducer.h" +#include #include "src/base/bits.h" #include "src/base/division-by-constant.h" #include "src/base/ieee754.h" @@ -317,6 +318,7 @@ const ComparisonBinaryOperator kComparisonBinaryOperators[] = { // Avoid undefined behavior on signed integer overflow. int32_t Shl(int32_t x, int32_t y) { return static_cast(x) << y; } +int64_t Shl(int64_t x, int64_t y) { return static_cast(x) << y; } } // namespace @@ -1091,11 +1093,123 @@ TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) { Int32Constant(x)); Reduction r = Reduce(node); ASSERT_TRUE(r.Changed()); - int32_t m = static_cast(~((1U << x) - 1U)); + int32_t m = static_cast(std::numeric_limits::max() << x); EXPECT_THAT(r.replacement(), IsWord32And(p0, IsInt32Constant(m))); } } +TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32SarShiftOutZeros) { + Node* p = Parameter(0); + TRACED_FORRANGE(int32_t, x, 1, 31) { + TRACED_FORRANGE(int32_t, y, 0, 31) { + Node* node = graph()->NewNode( + machine()->Word32Shl(), + graph()->NewNode(machine()->Word32Sar(ShiftKind::kShiftOutZeros), p, + Int32Constant(x)), + Int32Constant(y)); + Reduction r = Reduce(node); + ASSERT_TRUE(r.Changed()); + if (x == y) { + // (p >> x) << y => p + EXPECT_THAT(r.replacement(), p); + } else if (x < y) { + // (p >> x) << y => p << (y - x) + EXPECT_THAT(r.replacement(), IsWord32Shl(p, IsInt32Constant(y - x))); + } else { + // (p >> x) << y => p >> (x - y) + EXPECT_THAT(r.replacement(), IsWord32Sar(p, IsInt32Constant(x - y))); + } + } + } +} + +// ----------------------------------------------------------------------------- +// Word64Shl + +TEST_F(MachineOperatorReducerTest, Word64ShlWithZeroShift) { + Node* p0 = Parameter(0); + Node* node = graph()->NewNode(machine()->Word64Shl(), p0, Int64Constant(0)); + Reduction r = Reduce(node); + ASSERT_TRUE(r.Changed()); + EXPECT_EQ(p0, r.replacement()); +} + +TEST_F(MachineOperatorReducerTest, Word64ShlWithWord64Sar) { + Node* p0 = Parameter(0); + TRACED_FORRANGE(int64_t, x, 1, 63) { + Node* node = graph()->NewNode( + machine()->Word64Shl(), + graph()->NewNode(machine()->Word64Sar(), p0, Int64Constant(x)), + Int64Constant(x)); + Reduction r = Reduce(node); + ASSERT_TRUE(r.Changed()); + int64_t m = static_cast(~(Shl(int64_t{1}, x) - 1)); + EXPECT_THAT(r.replacement(), IsWord64And(p0, IsInt64Constant(m))); + } +} + +TEST_F(MachineOperatorReducerTest, + Word64ShlWithWord64SarAndInt64AddAndConstant) { + Node* const p0 = Parameter(0); + TRACED_FOREACH(int64_t, k, kInt64Values) { + TRACED_FORRANGE(int64_t, l, 1, 63) { + if (Shl(k, l) == 0) continue; + // (x + (K << L)) >> L << L => (x & (-1 << L)) + (K << L) + Reduction const r = Reduce(graph()->NewNode( + machine()->Word64Shl(), + graph()->NewNode(machine()->Word64Sar(), + graph()->NewNode(machine()->Int64Add(), p0, + Int64Constant(Shl(k, l))), + Int64Constant(l)), + Int64Constant(l))); + ASSERT_TRUE(r.Changed()); + EXPECT_THAT( + r.replacement(), + IsInt64Add(IsWord64And(p0, IsInt64Constant(Shl(int64_t{-1}, l))), + IsInt64Constant(Shl(k, l)))); + } + } +} + +TEST_F(MachineOperatorReducerTest, Word64ShlWithWord64Shr) { + Node* p0 = Parameter(0); + TRACED_FORRANGE(int64_t, x, 1, 63) { + Node* node = graph()->NewNode( + machine()->Word64Shl(), + graph()->NewNode(machine()->Word64Shr(), p0, Int64Constant(x)), + Int64Constant(x)); + Reduction r = Reduce(node); + ASSERT_TRUE(r.Changed()); + int64_t m = static_cast(std::numeric_limits::max() << x); + EXPECT_THAT(r.replacement(), IsWord64And(p0, IsInt64Constant(m))); + } +} + +TEST_F(MachineOperatorReducerTest, Word64ShlWithWord64SarShiftOutZeros) { + Node* p = Parameter(0); + TRACED_FORRANGE(int64_t, x, 1, 63) { + TRACED_FORRANGE(int64_t, y, 0, 63) { + Node* node = graph()->NewNode( + machine()->Word64Shl(), + graph()->NewNode(machine()->Word64Sar(ShiftKind::kShiftOutZeros), p, + Int64Constant(x)), + Int64Constant(y)); + Reduction r = Reduce(node); + ASSERT_TRUE(r.Changed()); + if (x == y) { + // (p >> x) << y => p + EXPECT_THAT(r.replacement(), p); + } else if (x < y) { + // (p >> x) << y => p << (y - x) + EXPECT_THAT(r.replacement(), IsWord64Shl(p, IsInt64Constant(y - x))); + } else { + // (p >> x) << y => p >> (x - y) + EXPECT_THAT(r.replacement(), IsWord64Sar(p, IsInt64Constant(x - y))); + } + } + } +} + // ----------------------------------------------------------------------------- // Word32Equal @@ -1891,6 +2005,29 @@ TEST_F(MachineOperatorReducerTest, Int32LessThanWithWord32Or) { } } +TEST_F(MachineOperatorReducerTest, Int32LessThanWithWord32SarShiftOutZeros) { + Node* const p0 = Parameter(0); + Node* const p1 = Parameter(1); + TRACED_FORRANGE(int32_t, shift0, 1, 3) { + TRACED_FORRANGE(int32_t, shift1, 1, 3) { + Node* const node = + graph()->NewNode(machine()->Int32LessThan(), + graph()->NewNode(machine()->Word32SarShiftOutZeros(), + p0, Int32Constant(shift0)), + graph()->NewNode(machine()->Word32SarShiftOutZeros(), + p1, Int32Constant(shift1))); + + Reduction r = Reduce(node); + if (shift0 == shift1) { + ASSERT_TRUE(r.Changed()); + EXPECT_THAT(r.replacement(), IsInt32LessThan(p0, p1)); + } else { + ASSERT_FALSE(r.Changed()); + } + } + } +} + // ----------------------------------------------------------------------------- // Uint32LessThan @@ -1911,6 +2048,80 @@ TEST_F(MachineOperatorReducerTest, Uint32LessThanWithWord32Sar) { } } +TEST_F(MachineOperatorReducerTest, Uint32LessThanWithWord32SarShiftOutZeros) { + Node* const p0 = Parameter(0); + Node* const p1 = Parameter(1); + TRACED_FORRANGE(int32_t, shift0, 1, 3) { + TRACED_FORRANGE(int32_t, shift1, 1, 3) { + Node* const node = + graph()->NewNode(machine()->Uint32LessThan(), + graph()->NewNode(machine()->Word32SarShiftOutZeros(), + p0, Int32Constant(shift0)), + graph()->NewNode(machine()->Word32SarShiftOutZeros(), + p1, Int32Constant(shift1))); + + Reduction r = Reduce(node); + if (shift0 == shift1) { + ASSERT_TRUE(r.Changed()); + EXPECT_THAT(r.replacement(), IsUint32LessThan(p0, p1)); + } else { + ASSERT_FALSE(r.Changed()); + } + } + } +} + +// ----------------------------------------------------------------------------- +// Uint64LessThan + +TEST_F(MachineOperatorReducerTest, Uint64LessThanWithWord64SarShiftOutZeros) { + Node* const p0 = Parameter(0); + Node* const p1 = Parameter(1); + TRACED_FORRANGE(int64_t, shift0, 1, 3) { + TRACED_FORRANGE(int64_t, shift1, 1, 3) { + Node* const node = + graph()->NewNode(machine()->Uint64LessThan(), + graph()->NewNode(machine()->Word64SarShiftOutZeros(), + p0, Int64Constant(shift0)), + graph()->NewNode(machine()->Word64SarShiftOutZeros(), + p1, Int64Constant(shift1))); + + Reduction r = Reduce(node); + if (shift0 == shift1) { + ASSERT_TRUE(r.Changed()); + EXPECT_THAT(r.replacement(), IsUint64LessThan(p0, p1)); + } else { + ASSERT_FALSE(r.Changed()); + } + } + } +} + +// ----------------------------------------------------------------------------- +// Int64LessThan + +TEST_F(MachineOperatorReducerTest, Int64LessThanWithWord64SarShiftOutZeros) { + Node* const p0 = Parameter(0); + Node* const p1 = Parameter(1); + TRACED_FORRANGE(int64_t, shift0, 1, 3) { + TRACED_FORRANGE(int64_t, shift1, 1, 3) { + Node* const node = + graph()->NewNode(machine()->Int64LessThan(), + graph()->NewNode(machine()->Word64SarShiftOutZeros(), + p0, Int64Constant(shift0)), + graph()->NewNode(machine()->Word64SarShiftOutZeros(), + p1, Int64Constant(shift1))); + + Reduction r = Reduce(node); + if (shift0 == shift1) { + ASSERT_TRUE(r.Changed()); + EXPECT_THAT(r.replacement(), IsInt64LessThan(p0, p1)); + } else { + ASSERT_FALSE(r.Changed()); + } + } + } +} // ----------------------------------------------------------------------------- // Float64Mul diff --git a/test/unittests/compiler/node-test-utils.cc b/test/unittests/compiler/node-test-utils.cc index 3869f90a87..aeceabeffa 100644 --- a/test/unittests/compiler/node-test-utils.cc +++ b/test/unittests/compiler/node-test-utils.cc @@ -2187,6 +2187,8 @@ IS_BINOP_MATCHER(Int64Add) IS_BINOP_MATCHER(Int64Div) IS_BINOP_MATCHER(Int64Sub) IS_BINOP_MATCHER(Int64Mul) +IS_BINOP_MATCHER(Int64LessThan) +IS_BINOP_MATCHER(Uint64LessThan) IS_BINOP_MATCHER(JSAdd) IS_BINOP_MATCHER(JSParseInt) IS_BINOP_MATCHER(Float32Equal) diff --git a/test/unittests/compiler/node-test-utils.h b/test/unittests/compiler/node-test-utils.h index 645ce54544..42d6db82cf 100644 --- a/test/unittests/compiler/node-test-utils.h +++ b/test/unittests/compiler/node-test-utils.h @@ -415,6 +415,10 @@ Matcher IsInt64Mul(const Matcher& lhs_matcher, const Matcher& rhs_matcher); Matcher IsInt64Div(const Matcher& lhs_matcher, const Matcher& rhs_matcher); +Matcher IsInt64LessThan(const Matcher& lhs_matcher, + const Matcher& rhs_matcher); +Matcher IsUint64LessThan(const Matcher& lhs_matcher, + const Matcher& rhs_matcher); Matcher IsJSAdd(const Matcher& lhs_matcher, const Matcher& rhs_matcher); Matcher IsJSParseInt(const Matcher& lhs_matcher,