[turbofan][csa] optimize Smi untagging better

- Introduce new operator variants for signed right-shifts with the
  additional information that they always shift out zeros.
- Use these new operators for Smi untagging.
- Merge left-shifts with a preceding Smi-untagging shift.
- Optimize comparisons of Smi-untagging shifts to operate on the
  unshifted word.
- Optimize 64bit comparisons of values expanded from 32bit to use
  a 32bit comparison instead.
- Change CodeStubAssembler::UntagSmi to first sign-extend and then
  right-shift to enable better address computations for Smi indices.

Bug: v8:9962
Change-Id: If91300f365e8f01457aebf0bd43bdf88b305c460
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2135734
Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Georg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67378}
This commit is contained in:
Tobias Tebbi 2020-04-24 19:29:07 +02:00 committed by Commit Bot
parent 961e99d320
commit ff22ae80e2
17 changed files with 550 additions and 39 deletions

View File

@ -763,8 +763,8 @@ TNode<BoolT> CodeStubAssembler::IsValidSmiIndex(TNode<Smi> smi) {
TNode<IntPtrT> CodeStubAssembler::TaggedIndexToIntPtr(
TNode<TaggedIndex> value) {
return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value),
IntPtrConstant(kSmiTagSize)));
return Signed(WordSarShiftOutZeros(BitcastTaggedToWordForTagAndSmiBits(value),
IntPtrConstant(kSmiTagSize)));
}
TNode<TaggedIndex> CodeStubAssembler::IntPtrToTaggedIndex(
@ -859,16 +859,17 @@ TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
if (ToIntPtrConstant(value, &constant_value)) {
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
}
TNode<IntPtrT> raw_bits = BitcastTaggedToWordForTagAndSmiBits(value);
if (COMPRESS_POINTERS_BOOL) {
return ChangeInt32ToIntPtr(SmiToInt32(value));
// Clear the upper half using sign-extension.
raw_bits = ChangeInt32ToIntPtr(TruncateIntPtrToInt32(raw_bits));
}
return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value),
SmiShiftBitsConstant()));
return Signed(WordSarShiftOutZeros(raw_bits, SmiShiftBitsConstant()));
}
TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
if (COMPRESS_POINTERS_BOOL) {
return Signed(Word32Sar(
return Signed(Word32SarShiftOutZeros(
TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)),
SmiShiftBitsConstant32()));
}

View File

@ -12,6 +12,7 @@
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
@ -3014,7 +3015,8 @@ void InstructionSelector::VisitUnreachable(Node* node) {
}
void InstructionSelector::VisitStaticAssert(Node* node) {
node->InputAt(0)->Print();
Node* asserted = node->InputAt(0);
asserted->Print(2);
FATAL("Expected turbofan static assert to hold, but got non-true input!\n");
}

View File

@ -273,6 +273,7 @@ class CodeAssemblerParameterizedLabel;
V(WordShl, WordT, WordT, IntegralT) \
V(WordShr, WordT, WordT, IntegralT) \
V(WordSar, WordT, WordT, IntegralT) \
V(WordSarShiftOutZeros, WordT, WordT, IntegralT) \
V(Word32Or, Word32T, Word32T, Word32T) \
V(Word32And, Word32T, Word32T, Word32T) \
V(Word32Xor, Word32T, Word32T, Word32T) \
@ -280,6 +281,7 @@ class CodeAssemblerParameterizedLabel;
V(Word32Shl, Word32T, Word32T, Word32T) \
V(Word32Shr, Word32T, Word32T, Word32T) \
V(Word32Sar, Word32T, Word32T, Word32T) \
V(Word32SarShiftOutZeros, Word32T, Word32T, Word32T) \
V(Word64And, Word64T, Word64T, Word64T) \
V(Word64Or, Word64T, Word64T, Word64T) \
V(Word64Xor, Word64T, Word64T, Word64T) \

View File

@ -4552,18 +4552,20 @@ Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
}
Node* EffectControlLinearizer::ChangeSmiToIntPtr(Node* value) {
// Do shift on 32bit values if Smis are stored in the lower word.
if (machine()->Is64() && SmiValuesAre31Bits()) {
return __ ChangeInt32ToInt64(
__ Word32Sar(__ TruncateInt64ToInt32(value), SmiShiftBitsConstant()));
// First sign-extend the upper half, then shift away the Smi tag.
return __ WordSarShiftOutZeros(
__ ChangeInt32ToInt64(__ TruncateInt64ToInt32(value)),
SmiShiftBitsConstant());
}
return __ WordSar(value, SmiShiftBitsConstant());
return __ WordSarShiftOutZeros(value, SmiShiftBitsConstant());
}
Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
// Do shift on 32bit values if Smis are stored in the lower word.
if (machine()->Is64() && SmiValuesAre31Bits()) {
return __ Word32Sar(__ TruncateInt64ToInt32(value), SmiShiftBitsConstant());
return __ Word32SarShiftOutZeros(__ TruncateInt64ToInt32(value),
SmiShiftBitsConstant());
}
if (machine()->Is64()) {
return __ TruncateInt64ToInt32(ChangeSmiToIntPtr(value));

View File

@ -82,6 +82,7 @@ class BasicBlock;
V(Word32Equal) \
V(Word32Or) \
V(Word32Sar) \
V(Word32SarShiftOutZeros) \
V(Word32Shl) \
V(Word32Shr) \
V(Word32Xor) \
@ -91,6 +92,7 @@ class BasicBlock;
V(WordAnd) \
V(WordEqual) \
V(WordSar) \
V(WordSarShiftOutZeros) \
V(WordShl)
#define CHECKED_ASSEMBLER_MACH_BINOP_LIST(V) \

View File

@ -4,6 +4,7 @@
#include "src/compiler/machine-operator-reducer.h"
#include <cmath>
#include <limits>
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
@ -14,6 +15,7 @@
#include "src/compiler/machine-graph.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
#include "src/numbers/conversions-inl.h"
namespace v8 {
@ -423,7 +425,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceBool(true);
}
}
break;
return ReduceWord32Comparisons(node);
}
case IrOpcode::kInt32LessThanOrEqual: {
Int32BinopMatcher m(node);
@ -431,7 +433,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceBool(m.left().Value() <= m.right().Value());
}
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
break;
return ReduceWord32Comparisons(node);
}
case IrOpcode::kUint32LessThan: {
Uint32BinopMatcher m(node);
@ -456,7 +458,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
// TODO(turbofan): else the comparison is always true.
}
}
break;
return ReduceWord32Comparisons(node);
}
case IrOpcode::kUint32LessThanOrEqual: {
Uint32BinopMatcher m(node);
@ -466,7 +468,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceBool(m.left().Value() <= m.right().Value());
}
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
break;
return ReduceWord32Comparisons(node);
}
case IrOpcode::kFloat32Sub: {
Float32BinopMatcher m(node);
@ -873,6 +875,11 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kTrapIf:
case IrOpcode::kTrapUnless:
return ReduceConditional(node);
case IrOpcode::kInt64LessThan:
case IrOpcode::kInt64LessThanOrEqual:
case IrOpcode::kUint64LessThan:
case IrOpcode::kUint64LessThanOrEqual:
return ReduceWord64Comparisons(node);
default:
break;
}
@ -1247,6 +1254,78 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
return NoChange();
}
Reduction MachineOperatorReducer::ReduceWord32Comparisons(Node* node) {
DCHECK(node->opcode() == IrOpcode::kInt32LessThan ||
node->opcode() == IrOpcode::kInt32LessThanOrEqual ||
node->opcode() == IrOpcode::kUint32LessThan ||
node->opcode() == IrOpcode::kUint32LessThanOrEqual);
Int32BinopMatcher m(node);
// (x >>> K) < (y >>> K) => x < y if only zeros shifted out
if (m.left().op() == machine()->Word32SarShiftOutZeros() &&
m.right().op() == machine()->Word32SarShiftOutZeros()) {
Int32BinopMatcher mleft(m.left().node());
Int32BinopMatcher mright(m.right().node());
if (mleft.right().HasValue() && mright.right().Is(mleft.right().Value())) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, mright.left().node());
return Changed(node);
}
}
return NoChange();
}
const Operator* MachineOperatorReducer::Map64To32Comparison(
const Operator* op, bool sign_extended) {
switch (op->opcode()) {
case IrOpcode::kInt64LessThan:
return sign_extended ? machine()->Int32LessThan()
: machine()->Uint32LessThan();
case IrOpcode::kInt64LessThanOrEqual:
return sign_extended ? machine()->Int32LessThanOrEqual()
: machine()->Uint32LessThanOrEqual();
case IrOpcode::kUint64LessThan:
return machine()->Uint32LessThan();
case IrOpcode::kUint64LessThanOrEqual:
return machine()->Uint32LessThanOrEqual();
default:
UNREACHABLE();
}
}
Reduction MachineOperatorReducer::ReduceWord64Comparisons(Node* node) {
DCHECK(node->opcode() == IrOpcode::kInt64LessThan ||
node->opcode() == IrOpcode::kInt64LessThanOrEqual ||
node->opcode() == IrOpcode::kUint64LessThan ||
node->opcode() == IrOpcode::kUint64LessThanOrEqual);
Int64BinopMatcher m(node);
bool sign_extended =
m.left().IsChangeInt32ToInt64() && m.right().IsChangeInt32ToInt64();
if (sign_extended || (m.left().IsChangeUint32ToUint64() &&
m.right().IsChangeUint32ToUint64())) {
node->ReplaceInput(0, NodeProperties::GetValueInput(m.left().node(), 0));
node->ReplaceInput(1, NodeProperties::GetValueInput(m.right().node(), 0));
NodeProperties::ChangeOp(node,
Map64To32Comparison(node->op(), sign_extended));
return Changed(node).FollowedBy(Reduce(node));
}
// (x >>> K) < (y >>> K) => x < y if only zeros shifted out
// This is useful for Smi untagging, which results in such a shift.
if (m.left().op() == machine()->Word64SarShiftOutZeros() &&
m.right().op() == machine()->Word64SarShiftOutZeros()) {
Int64BinopMatcher mleft(m.left().node());
Int64BinopMatcher mright(m.right().node());
if (mleft.right().HasValue() && mright.right().Is(mleft.right().Value())) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, mright.left().node());
return Changed(node);
}
}
return NoChange();
}
Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) {
DCHECK((node->opcode() == IrOpcode::kWord32Shl) ||
(node->opcode() == IrOpcode::kWord32Shr) ||
@ -1275,14 +1354,42 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
base::ShlWithWraparound(m.left().Value(), m.right().Value()));
}
if (m.right().IsInRange(1, 31)) {
// (x >>> K) << K => x & ~(2^K - 1)
// (x >> K) << K => x & ~(2^K - 1)
if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
Int32BinopMatcher mleft(m.left().node());
// If x >> K only shifted out zeros:
// (x >> K) << L => x if K == L
// (x >> K) << L => x >> (K-L) if K > L
// (x >> K) << L => x << (L-K) if K < L
// Since this is used for Smi untagging, we currently only need it for
// signed shifts.
if (mleft.op() == machine()->Word32SarShiftOutZeros() &&
mleft.right().IsInRange(1, 31)) {
Node* x = mleft.left().node();
int k = mleft.right().Value();
int l = m.right().Value();
if (k == l) {
return Replace(x);
} else if (k > l) {
node->ReplaceInput(0, x);
node->ReplaceInput(1, Uint32Constant(k - l));
NodeProperties::ChangeOp(node, machine()->Word32Sar());
return Changed(node).FollowedBy(ReduceWord32Sar(node));
} else {
DCHECK(k < l);
node->ReplaceInput(0, x);
node->ReplaceInput(1, Uint32Constant(l - k));
return Changed(node);
}
}
// (x >>> K) << K => x & ~(2^K - 1)
// (x >> K) << K => x & ~(2^K - 1)
if (mleft.right().Is(m.right().Value())) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1,
Uint32Constant(~((1U << m.right().Value()) - 1U)));
Uint32Constant(std::numeric_limits<uint32_t>::max()
<< m.right().Value()));
NodeProperties::ChangeOp(node, machine()->Word32And());
return Changed(node).FollowedBy(ReduceWord32And(node));
}
@ -1299,6 +1406,46 @@ Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) {
return ReplaceInt64(
base::ShlWithWraparound(m.left().Value(), m.right().Value()));
}
if (m.right().IsInRange(1, 63) &&
(m.left().IsWord64Sar() || m.left().IsWord64Shr())) {
Int64BinopMatcher mleft(m.left().node());
// If x >> K only shifted out zeros:
// (x >> K) << L => x if K == L
// (x >> K) << L => x >> (K-L) if K > L
// (x >> K) << L => x << (L-K) if K < L
// Since this is used for Smi untagging, we currently only need it for
// signed shifts.
if (mleft.op() == machine()->Word64SarShiftOutZeros() &&
mleft.right().IsInRange(1, 63)) {
Node* x = mleft.left().node();
int64_t k = mleft.right().Value();
int64_t l = m.right().Value();
if (k == l) {
return Replace(x);
} else if (k > l) {
node->ReplaceInput(0, x);
node->ReplaceInput(1, Uint64Constant(k - l));
NodeProperties::ChangeOp(node, machine()->Word64Sar());
return Changed(node).FollowedBy(ReduceWord64Sar(node));
} else {
DCHECK(k < l);
node->ReplaceInput(0, x);
node->ReplaceInput(1, Uint64Constant(l - k));
return Changed(node);
}
}
// (x >>> K) << K => x & ~(2^K - 1)
// (x >> K) << K => x & ~(2^K - 1)
if (mleft.right().Is(m.right().Value())) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, Uint64Constant(std::numeric_limits<uint64_t>::max()
<< m.right().Value()));
NodeProperties::ChangeOp(node, machine()->Word64And());
return Changed(node).FollowedBy(ReduceWord64And(node));
}
}
return NoChange();
}

View File

@ -92,6 +92,9 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Reduction ReduceUint32Mod(Node* node);
Reduction ReduceStore(Node* node);
Reduction ReduceProjection(size_t index, Node* node);
const Operator* Map64To32Comparison(const Operator* op, bool sign_extended);
Reduction ReduceWord32Comparisons(Node* node);
Reduction ReduceWord64Comparisons(Node* node);
Reduction ReduceWord32Shifts(Node* node);
Reduction ReduceWord32Shl(Node* node);
Reduction ReduceWord64Shl(Node* node);

View File

@ -106,7 +106,6 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
return OpParameter<LoadRepresentation>(op);
}
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kStore == op->opcode() ||
IrOpcode::kProtectedStore == op->opcode());
@ -150,6 +149,22 @@ MachineType AtomicOpType(Operator const* op) {
return OpParameter<MachineType>(op);
}
size_t hash_value(ShiftKind kind) { return static_cast<size_t>(kind); }
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, ShiftKind kind) {
switch (kind) {
case ShiftKind::kNormal:
return os << "Normal";
case ShiftKind::kShiftOutZeros:
return os << "ShiftOutZeros";
}
}
ShiftKind ShiftKindOf(Operator const* op) {
DCHECK(IrOpcode::kWord32Sar == op->opcode() ||
IrOpcode::kWord64Sar == op->opcode());
return OpParameter<ShiftKind>(op);
}
// The format is:
// V(Name, properties, value_input_count, control_input_count, output_count)
#define PURE_BINARY_OP_LIST_32(V) \
@ -158,7 +173,6 @@ MachineType AtomicOpType(Operator const* op) {
V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word32Shl, Operator::kNoProperties, 2, 0, 1) \
V(Word32Shr, Operator::kNoProperties, 2, 0, 1) \
V(Word32Sar, Operator::kNoProperties, 2, 0, 1) \
V(Word32Ror, Operator::kNoProperties, 2, 0, 1) \
V(Word32Equal, Operator::kCommutative, 2, 0, 1) \
V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
@ -183,7 +197,6 @@ MachineType AtomicOpType(Operator const* op) {
V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word64Shl, Operator::kNoProperties, 2, 0, 1) \
V(Word64Shr, Operator::kNoProperties, 2, 0, 1) \
V(Word64Sar, Operator::kNoProperties, 2, 0, 1) \
V(Word64Ror, Operator::kNoProperties, 2, 0, 1) \
V(Word64Equal, Operator::kCommutative, 2, 0, 1) \
V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
@ -652,6 +665,38 @@ PURE_OPTIONAL_OP_LIST(PURE)
OVERFLOW_OP_LIST(OVERFLOW_OP)
#undef OVERFLOW_OP
template <ShiftKind kind>
struct Word32SarOperator : Operator1<ShiftKind> {
Word32SarOperator()
: Operator1(IrOpcode::kWord32Sar, Operator::kPure, "Word32Sar", 2, 0, 0,
1, 0, 0, kind) {}
};
const Operator* MachineOperatorBuilder::Word32Sar(ShiftKind kind) {
switch (kind) {
case ShiftKind::kNormal:
return GetCachedOperator<Word32SarOperator<ShiftKind::kNormal>>();
case ShiftKind::kShiftOutZeros:
return GetCachedOperator<Word32SarOperator<ShiftKind::kShiftOutZeros>>();
}
}
template <ShiftKind kind>
struct Word64SarOperator : Operator1<ShiftKind> {
Word64SarOperator()
: Operator1(IrOpcode::kWord64Sar, Operator::kPure, "Word64Sar", 2, 0, 0,
1, 0, 0, kind) {}
};
const Operator* MachineOperatorBuilder::Word64Sar(ShiftKind kind) {
switch (kind) {
case ShiftKind::kNormal:
return GetCachedOperator<Word64SarOperator<ShiftKind::kNormal>>();
case ShiftKind::kShiftOutZeros:
return GetCachedOperator<Word64SarOperator<ShiftKind::kShiftOutZeros>>();
}
}
template <MachineRepresentation rep, MachineSemantic sem>
struct LoadOperator : public Operator1<LoadRepresentation> {
LoadOperator()

View File

@ -180,6 +180,16 @@ V8_EXPORT_PRIVATE S8x16ShuffleParameter const& S8x16ShuffleParameterOf(
StackCheckKind StackCheckKindOf(Operator const* op) V8_WARN_UNUSED_RESULT;
// ShiftKind::kShiftOutZeros means that it is guaranteed that the bits shifted
// out of the left operand are all zeros. If this is not the case, undefined
// behavior (i.e., incorrect optimizations) will happen.
// This is mostly useful for Smi untagging.
enum class ShiftKind { kNormal, kShiftOutZeros };
size_t hash_value(ShiftKind);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ShiftKind);
ShiftKind ShiftKindOf(Operator const*) V8_WARN_UNUSED_RESULT;
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
@ -293,7 +303,11 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Word32Xor();
const Operator* Word32Shl();
const Operator* Word32Shr();
const Operator* Word32Sar();
const Operator* Word32Sar(ShiftKind kind);
const Operator* Word32Sar() { return Word32Sar(ShiftKind::kNormal); }
const Operator* Word32SarShiftOutZeros() {
return Word32Sar(ShiftKind::kShiftOutZeros);
}
const Operator* Word32Ror();
const Operator* Word32Equal();
const Operator* Word32Clz();
@ -318,7 +332,11 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Word64Xor();
const Operator* Word64Shl();
const Operator* Word64Shr();
const Operator* Word64Sar();
const Operator* Word64Sar(ShiftKind kind);
const Operator* Word64Sar() { return Word64Sar(ShiftKind::kNormal); }
const Operator* Word64SarShiftOutZeros() {
return Word64Sar(ShiftKind::kShiftOutZeros);
}
const Operator* Word64Ror();
const Operator* Word64Clz();
const OptionalOperator Word64Ctz();
@ -841,7 +859,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
V(Word, Xor) \
V(Word, Shl) \
V(Word, Shr) \
V(Word, Sar) \
V(Word, Ror) \
V(Word, Clz) \
V(Word, Equal) \
@ -864,6 +881,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
#undef PSEUDO_OP
#undef PSEUDO_OP_LIST
const Operator* WordSar(ShiftKind kind = ShiftKind::kNormal) {
return Is32() ? Word32Sar(kind) : Word64Sar(kind);
}
const Operator* WordSarShiftOutZeros() {
return WordSar(ShiftKind::kShiftOutZeros);
}
private:
Zone* zone_;
MachineRepresentation const word_;

View File

@ -314,22 +314,32 @@ bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
return mask == 3;
}
void Node::Print() const {
void Node::Print(int depth) const {
StdoutStream os;
Print(os);
Print(os, depth);
}
void Node::Print(std::ostream& os) const {
os << *this << std::endl;
for (Node* input : this->inputs()) {
namespace {
void PrintNode(const Node* node, std::ostream& os, int depth,
int indentation = 0) {
for (int i = 0; i < indentation; ++i) {
os << " ";
if (input) {
os << *input;
} else {
os << "(NULL)";
}
os << std::endl;
}
if (node) {
os << *node;
} else {
os << "(NULL)";
}
os << std::endl;
if (depth <= 0) return;
for (Node* input : node->inputs()) {
PrintNode(input, os, depth - 1, indentation + 1);
}
}
} // namespace
void Node::Print(std::ostream& os, int depth) const {
PrintNode(this, os, depth);
}
std::ostream& operator<<(std::ostream& os, const Node& n) {

View File

@ -144,8 +144,8 @@ class V8_EXPORT_PRIVATE Node final {
// Returns true if {owner1} and {owner2} are the only users of {this} node.
bool OwnedBy(Node const* owner1, Node const* owner2) const;
void Print() const;
void Print(std::ostream&) const;
void Print(int depth = 1) const;
void Print(std::ostream&, int depth = 1) const;
private:
struct Use;

View File

@ -1955,10 +1955,12 @@ struct CsaOptimizationPhase {
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
AddReducer(data, &graph_reducer, &value_numbering);
graph_reducer.ReduceGraph();
}
};

View File

@ -311,6 +311,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* WordSar(Node* a, Node* b) {
return AddNode(machine()->WordSar(), a, b);
}
Node* WordSarShiftOutZeros(Node* a, Node* b) {
return AddNode(machine()->WordSarShiftOutZeros(), a, b);
}
Node* WordRor(Node* a, Node* b) {
return AddNode(machine()->WordRor(), a, b);
}
@ -346,6 +349,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* Word32Sar(Node* a, Node* b) {
return AddNode(machine()->Word32Sar(), a, b);
}
Node* Word32SarShiftOutZeros(Node* a, Node* b) {
return AddNode(machine()->Word32SarShiftOutZeros(), a, b);
}
Node* Word32Ror(Node* a, Node* b) {
return AddNode(machine()->Word32Ror(), a, b);
}

View File

@ -3940,6 +3940,54 @@ TEST(WasmTaggedToFloat64) {
}
}
TEST(SmiUntagLeftShiftOptimization) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
{
TNode<TaggedIndex> param =
TNode<TaggedIndex>::UncheckedCast(m.Parameter(0));
TNode<WordT> unoptimized =
m.IntPtrMul(m.TaggedIndexToIntPtr(param), m.IntPtrConstant(8));
TNode<WordT> optimized = m.WordShl(
m.BitcastTaggedToWordForTagAndSmiBits(param), 3 - kSmiTagSize);
m.StaticAssert(m.WordEqual(unoptimized, optimized));
m.Return(m.UndefinedConstant());
}
AssemblerOptions options = AssemblerOptions::Default(isolate);
FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
}
TEST(SmiUntagComparisonOptimization) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
{
TNode<Smi> a = TNode<Smi>::UncheckedCast(m.Parameter(0));
TNode<Smi> b = TNode<Smi>::UncheckedCast(m.Parameter(1));
TNode<BoolT> unoptimized = m.UintPtrLessThan(m.SmiUntag(a), m.SmiUntag(b));
#ifdef V8_COMPRESS_POINTERS
TNode<BoolT> optimized = m.Uint32LessThan(
m.TruncateIntPtrToInt32(m.BitcastTaggedToWordForTagAndSmiBits(a)),
m.TruncateIntPtrToInt32(m.BitcastTaggedToWordForTagAndSmiBits(b)));
#else
TNode<BoolT> optimized =
m.UintPtrLessThan(m.BitcastTaggedToWordForTagAndSmiBits(a),
m.BitcastTaggedToWordForTagAndSmiBits(b));
#endif
m.StaticAssert(m.Word32Equal(unoptimized, optimized));
m.Return(m.UndefinedConstant());
}
AssemblerOptions options = AssemblerOptions::Default(isolate);
FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/machine-operator-reducer.h"
#include <limits>
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/ieee754.h"
@ -317,6 +318,7 @@ const ComparisonBinaryOperator kComparisonBinaryOperators[] = {
// Avoid undefined behavior on signed integer overflow.
int32_t Shl(int32_t x, int32_t y) { return static_cast<uint32_t>(x) << y; }
int64_t Shl(int64_t x, int64_t y) { return static_cast<uint64_t>(x) << y; }
} // namespace
@ -1091,11 +1093,123 @@ TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) {
Int32Constant(x));
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
int32_t m = static_cast<int32_t>(~((1U << x) - 1U));
int32_t m = static_cast<int32_t>(std::numeric_limits<uint32_t>::max() << x);
EXPECT_THAT(r.replacement(), IsWord32And(p0, IsInt32Constant(m)));
}
}
TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32SarShiftOutZeros) {
Node* p = Parameter(0);
TRACED_FORRANGE(int32_t, x, 1, 31) {
TRACED_FORRANGE(int32_t, y, 0, 31) {
Node* node = graph()->NewNode(
machine()->Word32Shl(),
graph()->NewNode(machine()->Word32Sar(ShiftKind::kShiftOutZeros), p,
Int32Constant(x)),
Int32Constant(y));
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
if (x == y) {
// (p >> x) << y => p
EXPECT_THAT(r.replacement(), p);
} else if (x < y) {
// (p >> x) << y => p << (y - x)
EXPECT_THAT(r.replacement(), IsWord32Shl(p, IsInt32Constant(y - x)));
} else {
// (p >> x) << y => p >> (x - y)
EXPECT_THAT(r.replacement(), IsWord32Sar(p, IsInt32Constant(x - y)));
}
}
}
}
// -----------------------------------------------------------------------------
// Word64Shl
TEST_F(MachineOperatorReducerTest, Word64ShlWithZeroShift) {
Node* p0 = Parameter(0);
Node* node = graph()->NewNode(machine()->Word64Shl(), p0, Int64Constant(0));
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
}
TEST_F(MachineOperatorReducerTest, Word64ShlWithWord64Sar) {
Node* p0 = Parameter(0);
TRACED_FORRANGE(int64_t, x, 1, 63) {
Node* node = graph()->NewNode(
machine()->Word64Shl(),
graph()->NewNode(machine()->Word64Sar(), p0, Int64Constant(x)),
Int64Constant(x));
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
int64_t m = static_cast<int64_t>(~(Shl(int64_t{1}, x) - 1));
EXPECT_THAT(r.replacement(), IsWord64And(p0, IsInt64Constant(m)));
}
}
TEST_F(MachineOperatorReducerTest,
Word64ShlWithWord64SarAndInt64AddAndConstant) {
Node* const p0 = Parameter(0);
TRACED_FOREACH(int64_t, k, kInt64Values) {
TRACED_FORRANGE(int64_t, l, 1, 63) {
if (Shl(k, l) == 0) continue;
// (x + (K << L)) >> L << L => (x & (-1 << L)) + (K << L)
Reduction const r = Reduce(graph()->NewNode(
machine()->Word64Shl(),
graph()->NewNode(machine()->Word64Sar(),
graph()->NewNode(machine()->Int64Add(), p0,
Int64Constant(Shl(k, l))),
Int64Constant(l)),
Int64Constant(l)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
IsInt64Add(IsWord64And(p0, IsInt64Constant(Shl(int64_t{-1}, l))),
IsInt64Constant(Shl(k, l))));
}
}
}
TEST_F(MachineOperatorReducerTest, Word64ShlWithWord64Shr) {
Node* p0 = Parameter(0);
TRACED_FORRANGE(int64_t, x, 1, 63) {
Node* node = graph()->NewNode(
machine()->Word64Shl(),
graph()->NewNode(machine()->Word64Shr(), p0, Int64Constant(x)),
Int64Constant(x));
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
int64_t m = static_cast<int64_t>(std::numeric_limits<uint64_t>::max() << x);
EXPECT_THAT(r.replacement(), IsWord64And(p0, IsInt64Constant(m)));
}
}
TEST_F(MachineOperatorReducerTest, Word64ShlWithWord64SarShiftOutZeros) {
Node* p = Parameter(0);
TRACED_FORRANGE(int64_t, x, 1, 63) {
TRACED_FORRANGE(int64_t, y, 0, 63) {
Node* node = graph()->NewNode(
machine()->Word64Shl(),
graph()->NewNode(machine()->Word64Sar(ShiftKind::kShiftOutZeros), p,
Int64Constant(x)),
Int64Constant(y));
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
if (x == y) {
// (p >> x) << y => p
EXPECT_THAT(r.replacement(), p);
} else if (x < y) {
// (p >> x) << y => p << (y - x)
EXPECT_THAT(r.replacement(), IsWord64Shl(p, IsInt64Constant(y - x)));
} else {
// (p >> x) << y => p >> (x - y)
EXPECT_THAT(r.replacement(), IsWord64Sar(p, IsInt64Constant(x - y)));
}
}
}
}
// -----------------------------------------------------------------------------
// Word32Equal
@ -1891,6 +2005,29 @@ TEST_F(MachineOperatorReducerTest, Int32LessThanWithWord32Or) {
}
}
TEST_F(MachineOperatorReducerTest, Int32LessThanWithWord32SarShiftOutZeros) {
Node* const p0 = Parameter(0);
Node* const p1 = Parameter(1);
TRACED_FORRANGE(int32_t, shift0, 1, 3) {
TRACED_FORRANGE(int32_t, shift1, 1, 3) {
Node* const node =
graph()->NewNode(machine()->Int32LessThan(),
graph()->NewNode(machine()->Word32SarShiftOutZeros(),
p0, Int32Constant(shift0)),
graph()->NewNode(machine()->Word32SarShiftOutZeros(),
p1, Int32Constant(shift1)));
Reduction r = Reduce(node);
if (shift0 == shift1) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32LessThan(p0, p1));
} else {
ASSERT_FALSE(r.Changed());
}
}
}
}
// -----------------------------------------------------------------------------
// Uint32LessThan
@ -1911,6 +2048,80 @@ TEST_F(MachineOperatorReducerTest, Uint32LessThanWithWord32Sar) {
}
}
TEST_F(MachineOperatorReducerTest, Uint32LessThanWithWord32SarShiftOutZeros) {
Node* const p0 = Parameter(0);
Node* const p1 = Parameter(1);
TRACED_FORRANGE(int32_t, shift0, 1, 3) {
TRACED_FORRANGE(int32_t, shift1, 1, 3) {
Node* const node =
graph()->NewNode(machine()->Uint32LessThan(),
graph()->NewNode(machine()->Word32SarShiftOutZeros(),
p0, Int32Constant(shift0)),
graph()->NewNode(machine()->Word32SarShiftOutZeros(),
p1, Int32Constant(shift1)));
Reduction r = Reduce(node);
if (shift0 == shift1) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsUint32LessThan(p0, p1));
} else {
ASSERT_FALSE(r.Changed());
}
}
}
}
// -----------------------------------------------------------------------------
// Uint64LessThan
TEST_F(MachineOperatorReducerTest, Uint64LessThanWithWord64SarShiftOutZeros) {
Node* const p0 = Parameter(0);
Node* const p1 = Parameter(1);
TRACED_FORRANGE(int64_t, shift0, 1, 3) {
TRACED_FORRANGE(int64_t, shift1, 1, 3) {
Node* const node =
graph()->NewNode(machine()->Uint64LessThan(),
graph()->NewNode(machine()->Word64SarShiftOutZeros(),
p0, Int64Constant(shift0)),
graph()->NewNode(machine()->Word64SarShiftOutZeros(),
p1, Int64Constant(shift1)));
Reduction r = Reduce(node);
if (shift0 == shift1) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsUint64LessThan(p0, p1));
} else {
ASSERT_FALSE(r.Changed());
}
}
}
}
// -----------------------------------------------------------------------------
// Int64LessThan
TEST_F(MachineOperatorReducerTest, Int64LessThanWithWord64SarShiftOutZeros) {
Node* const p0 = Parameter(0);
Node* const p1 = Parameter(1);
TRACED_FORRANGE(int64_t, shift0, 1, 3) {
TRACED_FORRANGE(int64_t, shift1, 1, 3) {
Node* const node =
graph()->NewNode(machine()->Int64LessThan(),
graph()->NewNode(machine()->Word64SarShiftOutZeros(),
p0, Int64Constant(shift0)),
graph()->NewNode(machine()->Word64SarShiftOutZeros(),
p1, Int64Constant(shift1)));
Reduction r = Reduce(node);
if (shift0 == shift1) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt64LessThan(p0, p1));
} else {
ASSERT_FALSE(r.Changed());
}
}
}
}
// -----------------------------------------------------------------------------
// Float64Mul

View File

@ -2187,6 +2187,8 @@ IS_BINOP_MATCHER(Int64Add)
IS_BINOP_MATCHER(Int64Div)
IS_BINOP_MATCHER(Int64Sub)
IS_BINOP_MATCHER(Int64Mul)
IS_BINOP_MATCHER(Int64LessThan)
IS_BINOP_MATCHER(Uint64LessThan)
IS_BINOP_MATCHER(JSAdd)
IS_BINOP_MATCHER(JSParseInt)
IS_BINOP_MATCHER(Float32Equal)

View File

@ -415,6 +415,10 @@ Matcher<Node*> IsInt64Mul(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64Div(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64LessThan(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsUint64LessThan(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsJSAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsJSParseInt(const Matcher<Node*>& lhs_matcher,