Revert "[turbofan][csa] optimize Smi untagging better"
This reverts commit ff22ae80e2
.
Reason for revert: new test fails on UBSAN
https://ci.chromium.org/p/v8/builders/ci/V8%20Linux64%20UBSan/10831
Original change's description:
> [turbofan][csa] optimize Smi untagging better
>
> - Introduce new operator variants for signed right-shifts with the
> additional information that they always shift out zeros.
> - Use these new operators for Smi untagging.
> - Merge left-shifts with a preceding Smi-untagging shift.
> - Optimize comparisons of Smi-untagging shifts to operate on the
> unshifted word.
> - Optimize 64bit comparisons of values expanded from 32bit to use
> a 32bit comparison instead.
> - Change CodeStubAssembler::UntagSmi to first sign-extend and then
> right-shift to enable better address computations for Smi indices.
>
> Bug: v8:9962
> Change-Id: If91300f365e8f01457aebf0bd43bdf88b305c460
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2135734
> Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
> Reviewed-by: Georg Neis <neis@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#67378}
TBR=neis@chromium.org,tebbi@chromium.org
Change-Id: I2617d7a44e5ae33fd79322d37c8b722c00162d22
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:9962
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2165873
Reviewed-by: Bill Budge <bbudge@chromium.org>
Commit-Queue: Bill Budge <bbudge@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67380}
This commit is contained in:
parent
f753ca5f6b
commit
cdea7999a8
@ -763,8 +763,8 @@ TNode<BoolT> CodeStubAssembler::IsValidSmiIndex(TNode<Smi> smi) {
|
||||
|
||||
TNode<IntPtrT> CodeStubAssembler::TaggedIndexToIntPtr(
|
||||
TNode<TaggedIndex> value) {
|
||||
return Signed(WordSarShiftOutZeros(BitcastTaggedToWordForTagAndSmiBits(value),
|
||||
IntPtrConstant(kSmiTagSize)));
|
||||
return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value),
|
||||
IntPtrConstant(kSmiTagSize)));
|
||||
}
|
||||
|
||||
TNode<TaggedIndex> CodeStubAssembler::IntPtrToTaggedIndex(
|
||||
@ -859,17 +859,16 @@ TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
|
||||
if (ToIntPtrConstant(value, &constant_value)) {
|
||||
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
|
||||
}
|
||||
TNode<IntPtrT> raw_bits = BitcastTaggedToWordForTagAndSmiBits(value);
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
// Clear the upper half using sign-extension.
|
||||
raw_bits = ChangeInt32ToIntPtr(TruncateIntPtrToInt32(raw_bits));
|
||||
return ChangeInt32ToIntPtr(SmiToInt32(value));
|
||||
}
|
||||
return Signed(WordSarShiftOutZeros(raw_bits, SmiShiftBitsConstant()));
|
||||
return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value),
|
||||
SmiShiftBitsConstant()));
|
||||
}
|
||||
|
||||
TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
return Signed(Word32SarShiftOutZeros(
|
||||
return Signed(Word32Sar(
|
||||
TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)),
|
||||
SmiShiftBitsConstant32()));
|
||||
}
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include "src/compiler/backend/instruction-selector-impl.h"
|
||||
#include "src/compiler/compiler-source-position-table.h"
|
||||
#include "src/compiler/node-matchers.h"
|
||||
#include "src/compiler/node-properties.h"
|
||||
#include "src/compiler/pipeline.h"
|
||||
#include "src/compiler/schedule.h"
|
||||
#include "src/compiler/state-values-utils.h"
|
||||
@ -3015,8 +3014,7 @@ void InstructionSelector::VisitUnreachable(Node* node) {
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitStaticAssert(Node* node) {
|
||||
Node* asserted = node->InputAt(0);
|
||||
asserted->Print(2);
|
||||
node->InputAt(0)->Print();
|
||||
FATAL("Expected turbofan static assert to hold, but got non-true input!\n");
|
||||
}
|
||||
|
||||
|
@ -273,7 +273,6 @@ class CodeAssemblerParameterizedLabel;
|
||||
V(WordShl, WordT, WordT, IntegralT) \
|
||||
V(WordShr, WordT, WordT, IntegralT) \
|
||||
V(WordSar, WordT, WordT, IntegralT) \
|
||||
V(WordSarShiftOutZeros, WordT, WordT, IntegralT) \
|
||||
V(Word32Or, Word32T, Word32T, Word32T) \
|
||||
V(Word32And, Word32T, Word32T, Word32T) \
|
||||
V(Word32Xor, Word32T, Word32T, Word32T) \
|
||||
@ -281,7 +280,6 @@ class CodeAssemblerParameterizedLabel;
|
||||
V(Word32Shl, Word32T, Word32T, Word32T) \
|
||||
V(Word32Shr, Word32T, Word32T, Word32T) \
|
||||
V(Word32Sar, Word32T, Word32T, Word32T) \
|
||||
V(Word32SarShiftOutZeros, Word32T, Word32T, Word32T) \
|
||||
V(Word64And, Word64T, Word64T, Word64T) \
|
||||
V(Word64Or, Word64T, Word64T, Word64T) \
|
||||
V(Word64Xor, Word64T, Word64T, Word64T) \
|
||||
|
@ -4552,20 +4552,18 @@ Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
|
||||
}
|
||||
|
||||
Node* EffectControlLinearizer::ChangeSmiToIntPtr(Node* value) {
|
||||
// Do shift on 32bit values if Smis are stored in the lower word.
|
||||
if (machine()->Is64() && SmiValuesAre31Bits()) {
|
||||
// First sign-extend the upper half, then shift away the Smi tag.
|
||||
return __ WordSarShiftOutZeros(
|
||||
__ ChangeInt32ToInt64(__ TruncateInt64ToInt32(value)),
|
||||
SmiShiftBitsConstant());
|
||||
return __ ChangeInt32ToInt64(
|
||||
__ Word32Sar(__ TruncateInt64ToInt32(value), SmiShiftBitsConstant()));
|
||||
}
|
||||
return __ WordSarShiftOutZeros(value, SmiShiftBitsConstant());
|
||||
return __ WordSar(value, SmiShiftBitsConstant());
|
||||
}
|
||||
|
||||
Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
|
||||
// Do shift on 32bit values if Smis are stored in the lower word.
|
||||
if (machine()->Is64() && SmiValuesAre31Bits()) {
|
||||
return __ Word32SarShiftOutZeros(__ TruncateInt64ToInt32(value),
|
||||
SmiShiftBitsConstant());
|
||||
return __ Word32Sar(__ TruncateInt64ToInt32(value), SmiShiftBitsConstant());
|
||||
}
|
||||
if (machine()->Is64()) {
|
||||
return __ TruncateInt64ToInt32(ChangeSmiToIntPtr(value));
|
||||
|
@ -82,7 +82,6 @@ class BasicBlock;
|
||||
V(Word32Equal) \
|
||||
V(Word32Or) \
|
||||
V(Word32Sar) \
|
||||
V(Word32SarShiftOutZeros) \
|
||||
V(Word32Shl) \
|
||||
V(Word32Shr) \
|
||||
V(Word32Xor) \
|
||||
@ -92,7 +91,6 @@ class BasicBlock;
|
||||
V(WordAnd) \
|
||||
V(WordEqual) \
|
||||
V(WordSar) \
|
||||
V(WordSarShiftOutZeros) \
|
||||
V(WordShl)
|
||||
|
||||
#define CHECKED_ASSEMBLER_MACH_BINOP_LIST(V) \
|
||||
|
@ -4,7 +4,6 @@
|
||||
|
||||
#include "src/compiler/machine-operator-reducer.h"
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/division-by-constant.h"
|
||||
@ -15,7 +14,6 @@
|
||||
#include "src/compiler/machine-graph.h"
|
||||
#include "src/compiler/node-matchers.h"
|
||||
#include "src/compiler/node-properties.h"
|
||||
#include "src/compiler/opcodes.h"
|
||||
#include "src/numbers/conversions-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -425,7 +423,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
|
||||
return ReplaceBool(true);
|
||||
}
|
||||
}
|
||||
return ReduceWord32Comparisons(node);
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kInt32LessThanOrEqual: {
|
||||
Int32BinopMatcher m(node);
|
||||
@ -433,7 +431,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
|
||||
return ReplaceBool(m.left().Value() <= m.right().Value());
|
||||
}
|
||||
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
|
||||
return ReduceWord32Comparisons(node);
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kUint32LessThan: {
|
||||
Uint32BinopMatcher m(node);
|
||||
@ -458,7 +456,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
|
||||
// TODO(turbofan): else the comparison is always true.
|
||||
}
|
||||
}
|
||||
return ReduceWord32Comparisons(node);
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kUint32LessThanOrEqual: {
|
||||
Uint32BinopMatcher m(node);
|
||||
@ -468,7 +466,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
|
||||
return ReplaceBool(m.left().Value() <= m.right().Value());
|
||||
}
|
||||
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
|
||||
return ReduceWord32Comparisons(node);
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kFloat32Sub: {
|
||||
Float32BinopMatcher m(node);
|
||||
@ -875,11 +873,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
|
||||
case IrOpcode::kTrapIf:
|
||||
case IrOpcode::kTrapUnless:
|
||||
return ReduceConditional(node);
|
||||
case IrOpcode::kInt64LessThan:
|
||||
case IrOpcode::kInt64LessThanOrEqual:
|
||||
case IrOpcode::kUint64LessThan:
|
||||
case IrOpcode::kUint64LessThanOrEqual:
|
||||
return ReduceWord64Comparisons(node);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -1254,78 +1247,6 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
Reduction MachineOperatorReducer::ReduceWord32Comparisons(Node* node) {
|
||||
DCHECK(node->opcode() == IrOpcode::kInt32LessThan ||
|
||||
node->opcode() == IrOpcode::kInt32LessThanOrEqual ||
|
||||
node->opcode() == IrOpcode::kUint32LessThan ||
|
||||
node->opcode() == IrOpcode::kUint32LessThanOrEqual);
|
||||
Int32BinopMatcher m(node);
|
||||
// (x >>> K) < (y >>> K) => x < y if only zeros shifted out
|
||||
if (m.left().op() == machine()->Word32SarShiftOutZeros() &&
|
||||
m.right().op() == machine()->Word32SarShiftOutZeros()) {
|
||||
Int32BinopMatcher mleft(m.left().node());
|
||||
Int32BinopMatcher mright(m.right().node());
|
||||
if (mleft.right().HasValue() && mright.right().Is(mleft.right().Value())) {
|
||||
node->ReplaceInput(0, mleft.left().node());
|
||||
node->ReplaceInput(1, mright.left().node());
|
||||
return Changed(node);
|
||||
}
|
||||
}
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
const Operator* MachineOperatorReducer::Map64To32Comparison(
|
||||
const Operator* op, bool sign_extended) {
|
||||
switch (op->opcode()) {
|
||||
case IrOpcode::kInt64LessThan:
|
||||
return sign_extended ? machine()->Int32LessThan()
|
||||
: machine()->Uint32LessThan();
|
||||
case IrOpcode::kInt64LessThanOrEqual:
|
||||
return sign_extended ? machine()->Int32LessThanOrEqual()
|
||||
: machine()->Uint32LessThanOrEqual();
|
||||
case IrOpcode::kUint64LessThan:
|
||||
return machine()->Uint32LessThan();
|
||||
case IrOpcode::kUint64LessThanOrEqual:
|
||||
return machine()->Uint32LessThanOrEqual();
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
Reduction MachineOperatorReducer::ReduceWord64Comparisons(Node* node) {
|
||||
DCHECK(node->opcode() == IrOpcode::kInt64LessThan ||
|
||||
node->opcode() == IrOpcode::kInt64LessThanOrEqual ||
|
||||
node->opcode() == IrOpcode::kUint64LessThan ||
|
||||
node->opcode() == IrOpcode::kUint64LessThanOrEqual);
|
||||
Int64BinopMatcher m(node);
|
||||
|
||||
bool sign_extended =
|
||||
m.left().IsChangeInt32ToInt64() && m.right().IsChangeInt32ToInt64();
|
||||
if (sign_extended || (m.left().IsChangeUint32ToUint64() &&
|
||||
m.right().IsChangeUint32ToUint64())) {
|
||||
node->ReplaceInput(0, NodeProperties::GetValueInput(m.left().node(), 0));
|
||||
node->ReplaceInput(1, NodeProperties::GetValueInput(m.right().node(), 0));
|
||||
NodeProperties::ChangeOp(node,
|
||||
Map64To32Comparison(node->op(), sign_extended));
|
||||
return Changed(node).FollowedBy(Reduce(node));
|
||||
}
|
||||
|
||||
// (x >>> K) < (y >>> K) => x < y if only zeros shifted out
|
||||
// This is useful for Smi untagging, which results in such a shift.
|
||||
if (m.left().op() == machine()->Word64SarShiftOutZeros() &&
|
||||
m.right().op() == machine()->Word64SarShiftOutZeros()) {
|
||||
Int64BinopMatcher mleft(m.left().node());
|
||||
Int64BinopMatcher mright(m.right().node());
|
||||
if (mleft.right().HasValue() && mright.right().Is(mleft.right().Value())) {
|
||||
node->ReplaceInput(0, mleft.left().node());
|
||||
node->ReplaceInput(1, mright.left().node());
|
||||
return Changed(node);
|
||||
}
|
||||
}
|
||||
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) {
|
||||
DCHECK((node->opcode() == IrOpcode::kWord32Shl) ||
|
||||
(node->opcode() == IrOpcode::kWord32Shr) ||
|
||||
@ -1354,42 +1275,14 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
|
||||
base::ShlWithWraparound(m.left().Value(), m.right().Value()));
|
||||
}
|
||||
if (m.right().IsInRange(1, 31)) {
|
||||
// (x >>> K) << K => x & ~(2^K - 1)
|
||||
// (x >> K) << K => x & ~(2^K - 1)
|
||||
if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
|
||||
Int32BinopMatcher mleft(m.left().node());
|
||||
|
||||
// If x >> K only shifted out zeros:
|
||||
// (x >> K) << L => x if K == L
|
||||
// (x >> K) << L => x >> (K-L) if K > L
|
||||
// (x >> K) << L => x << (L-K) if K < L
|
||||
// Since this is used for Smi untagging, we currently only need it for
|
||||
// signed shifts.
|
||||
if (mleft.op() == machine()->Word32SarShiftOutZeros() &&
|
||||
mleft.right().IsInRange(1, 31)) {
|
||||
Node* x = mleft.left().node();
|
||||
int k = mleft.right().Value();
|
||||
int l = m.right().Value();
|
||||
if (k == l) {
|
||||
return Replace(x);
|
||||
} else if (k > l) {
|
||||
node->ReplaceInput(0, x);
|
||||
node->ReplaceInput(1, Uint32Constant(k - l));
|
||||
NodeProperties::ChangeOp(node, machine()->Word32Sar());
|
||||
return Changed(node).FollowedBy(ReduceWord32Sar(node));
|
||||
} else {
|
||||
DCHECK(k < l);
|
||||
node->ReplaceInput(0, x);
|
||||
node->ReplaceInput(1, Uint32Constant(l - k));
|
||||
return Changed(node);
|
||||
}
|
||||
}
|
||||
|
||||
// (x >>> K) << K => x & ~(2^K - 1)
|
||||
// (x >> K) << K => x & ~(2^K - 1)
|
||||
if (mleft.right().Is(m.right().Value())) {
|
||||
node->ReplaceInput(0, mleft.left().node());
|
||||
node->ReplaceInput(1,
|
||||
Uint32Constant(std::numeric_limits<uint32_t>::max()
|
||||
<< m.right().Value()));
|
||||
Uint32Constant(~((1U << m.right().Value()) - 1U)));
|
||||
NodeProperties::ChangeOp(node, machine()->Word32And());
|
||||
return Changed(node).FollowedBy(ReduceWord32And(node));
|
||||
}
|
||||
@ -1406,46 +1299,6 @@ Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) {
|
||||
return ReplaceInt64(
|
||||
base::ShlWithWraparound(m.left().Value(), m.right().Value()));
|
||||
}
|
||||
if (m.right().IsInRange(1, 63) &&
|
||||
(m.left().IsWord64Sar() || m.left().IsWord64Shr())) {
|
||||
Int64BinopMatcher mleft(m.left().node());
|
||||
|
||||
// If x >> K only shifted out zeros:
|
||||
// (x >> K) << L => x if K == L
|
||||
// (x >> K) << L => x >> (K-L) if K > L
|
||||
// (x >> K) << L => x << (L-K) if K < L
|
||||
// Since this is used for Smi untagging, we currently only need it for
|
||||
// signed shifts.
|
||||
if (mleft.op() == machine()->Word64SarShiftOutZeros() &&
|
||||
mleft.right().IsInRange(1, 63)) {
|
||||
Node* x = mleft.left().node();
|
||||
int64_t k = mleft.right().Value();
|
||||
int64_t l = m.right().Value();
|
||||
if (k == l) {
|
||||
return Replace(x);
|
||||
} else if (k > l) {
|
||||
node->ReplaceInput(0, x);
|
||||
node->ReplaceInput(1, Uint64Constant(k - l));
|
||||
NodeProperties::ChangeOp(node, machine()->Word64Sar());
|
||||
return Changed(node).FollowedBy(ReduceWord64Sar(node));
|
||||
} else {
|
||||
DCHECK(k < l);
|
||||
node->ReplaceInput(0, x);
|
||||
node->ReplaceInput(1, Uint64Constant(l - k));
|
||||
return Changed(node);
|
||||
}
|
||||
}
|
||||
|
||||
// (x >>> K) << K => x & ~(2^K - 1)
|
||||
// (x >> K) << K => x & ~(2^K - 1)
|
||||
if (mleft.right().Is(m.right().Value())) {
|
||||
node->ReplaceInput(0, mleft.left().node());
|
||||
node->ReplaceInput(1, Uint64Constant(std::numeric_limits<uint64_t>::max()
|
||||
<< m.right().Value()));
|
||||
NodeProperties::ChangeOp(node, machine()->Word64And());
|
||||
return Changed(node).FollowedBy(ReduceWord64And(node));
|
||||
}
|
||||
}
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
|
@ -92,9 +92,6 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
|
||||
Reduction ReduceUint32Mod(Node* node);
|
||||
Reduction ReduceStore(Node* node);
|
||||
Reduction ReduceProjection(size_t index, Node* node);
|
||||
const Operator* Map64To32Comparison(const Operator* op, bool sign_extended);
|
||||
Reduction ReduceWord32Comparisons(Node* node);
|
||||
Reduction ReduceWord64Comparisons(Node* node);
|
||||
Reduction ReduceWord32Shifts(Node* node);
|
||||
Reduction ReduceWord32Shl(Node* node);
|
||||
Reduction ReduceWord64Shl(Node* node);
|
||||
|
@ -106,6 +106,7 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
|
||||
return OpParameter<LoadRepresentation>(op);
|
||||
}
|
||||
|
||||
|
||||
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
|
||||
DCHECK(IrOpcode::kStore == op->opcode() ||
|
||||
IrOpcode::kProtectedStore == op->opcode());
|
||||
@ -149,22 +150,6 @@ MachineType AtomicOpType(Operator const* op) {
|
||||
return OpParameter<MachineType>(op);
|
||||
}
|
||||
|
||||
size_t hash_value(ShiftKind kind) { return static_cast<size_t>(kind); }
|
||||
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, ShiftKind kind) {
|
||||
switch (kind) {
|
||||
case ShiftKind::kNormal:
|
||||
return os << "Normal";
|
||||
case ShiftKind::kShiftOutZeros:
|
||||
return os << "ShiftOutZeros";
|
||||
}
|
||||
}
|
||||
|
||||
ShiftKind ShiftKindOf(Operator const* op) {
|
||||
DCHECK(IrOpcode::kWord32Sar == op->opcode() ||
|
||||
IrOpcode::kWord64Sar == op->opcode());
|
||||
return OpParameter<ShiftKind>(op);
|
||||
}
|
||||
|
||||
// The format is:
|
||||
// V(Name, properties, value_input_count, control_input_count, output_count)
|
||||
#define PURE_BINARY_OP_LIST_32(V) \
|
||||
@ -173,6 +158,7 @@ ShiftKind ShiftKindOf(Operator const* op) {
|
||||
V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
|
||||
V(Word32Shl, Operator::kNoProperties, 2, 0, 1) \
|
||||
V(Word32Shr, Operator::kNoProperties, 2, 0, 1) \
|
||||
V(Word32Sar, Operator::kNoProperties, 2, 0, 1) \
|
||||
V(Word32Ror, Operator::kNoProperties, 2, 0, 1) \
|
||||
V(Word32Equal, Operator::kCommutative, 2, 0, 1) \
|
||||
V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
|
||||
@ -197,6 +183,7 @@ ShiftKind ShiftKindOf(Operator const* op) {
|
||||
V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
|
||||
V(Word64Shl, Operator::kNoProperties, 2, 0, 1) \
|
||||
V(Word64Shr, Operator::kNoProperties, 2, 0, 1) \
|
||||
V(Word64Sar, Operator::kNoProperties, 2, 0, 1) \
|
||||
V(Word64Ror, Operator::kNoProperties, 2, 0, 1) \
|
||||
V(Word64Equal, Operator::kCommutative, 2, 0, 1) \
|
||||
V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
|
||||
@ -665,38 +652,6 @@ PURE_OPTIONAL_OP_LIST(PURE)
|
||||
OVERFLOW_OP_LIST(OVERFLOW_OP)
|
||||
#undef OVERFLOW_OP
|
||||
|
||||
template <ShiftKind kind>
|
||||
struct Word32SarOperator : Operator1<ShiftKind> {
|
||||
Word32SarOperator()
|
||||
: Operator1(IrOpcode::kWord32Sar, Operator::kPure, "Word32Sar", 2, 0, 0,
|
||||
1, 0, 0, kind) {}
|
||||
};
|
||||
|
||||
const Operator* MachineOperatorBuilder::Word32Sar(ShiftKind kind) {
|
||||
switch (kind) {
|
||||
case ShiftKind::kNormal:
|
||||
return GetCachedOperator<Word32SarOperator<ShiftKind::kNormal>>();
|
||||
case ShiftKind::kShiftOutZeros:
|
||||
return GetCachedOperator<Word32SarOperator<ShiftKind::kShiftOutZeros>>();
|
||||
}
|
||||
}
|
||||
|
||||
template <ShiftKind kind>
|
||||
struct Word64SarOperator : Operator1<ShiftKind> {
|
||||
Word64SarOperator()
|
||||
: Operator1(IrOpcode::kWord64Sar, Operator::kPure, "Word64Sar", 2, 0, 0,
|
||||
1, 0, 0, kind) {}
|
||||
};
|
||||
|
||||
const Operator* MachineOperatorBuilder::Word64Sar(ShiftKind kind) {
|
||||
switch (kind) {
|
||||
case ShiftKind::kNormal:
|
||||
return GetCachedOperator<Word64SarOperator<ShiftKind::kNormal>>();
|
||||
case ShiftKind::kShiftOutZeros:
|
||||
return GetCachedOperator<Word64SarOperator<ShiftKind::kShiftOutZeros>>();
|
||||
}
|
||||
}
|
||||
|
||||
template <MachineRepresentation rep, MachineSemantic sem>
|
||||
struct LoadOperator : public Operator1<LoadRepresentation> {
|
||||
LoadOperator()
|
||||
|
@ -180,16 +180,6 @@ V8_EXPORT_PRIVATE S8x16ShuffleParameter const& S8x16ShuffleParameterOf(
|
||||
|
||||
StackCheckKind StackCheckKindOf(Operator const* op) V8_WARN_UNUSED_RESULT;
|
||||
|
||||
// ShiftKind::kShiftOutZeros means that it is guaranteed that the bits shifted
|
||||
// out of the left operand are all zeros. If this is not the case, undefined
|
||||
// behavior (i.e., incorrect optimizations) will happen.
|
||||
// This is mostly useful for Smi untagging.
|
||||
enum class ShiftKind { kNormal, kShiftOutZeros };
|
||||
|
||||
size_t hash_value(ShiftKind);
|
||||
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ShiftKind);
|
||||
ShiftKind ShiftKindOf(Operator const*) V8_WARN_UNUSED_RESULT;
|
||||
|
||||
// Interface for building machine-level operators. These operators are
|
||||
// machine-level but machine-independent and thus define a language suitable
|
||||
// for generating code to run on architectures such as ia32, x64, arm, etc.
|
||||
@ -303,11 +293,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
|
||||
const Operator* Word32Xor();
|
||||
const Operator* Word32Shl();
|
||||
const Operator* Word32Shr();
|
||||
const Operator* Word32Sar(ShiftKind kind);
|
||||
const Operator* Word32Sar() { return Word32Sar(ShiftKind::kNormal); }
|
||||
const Operator* Word32SarShiftOutZeros() {
|
||||
return Word32Sar(ShiftKind::kShiftOutZeros);
|
||||
}
|
||||
const Operator* Word32Sar();
|
||||
const Operator* Word32Ror();
|
||||
const Operator* Word32Equal();
|
||||
const Operator* Word32Clz();
|
||||
@ -332,11 +318,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
|
||||
const Operator* Word64Xor();
|
||||
const Operator* Word64Shl();
|
||||
const Operator* Word64Shr();
|
||||
const Operator* Word64Sar(ShiftKind kind);
|
||||
const Operator* Word64Sar() { return Word64Sar(ShiftKind::kNormal); }
|
||||
const Operator* Word64SarShiftOutZeros() {
|
||||
return Word64Sar(ShiftKind::kShiftOutZeros);
|
||||
}
|
||||
const Operator* Word64Sar();
|
||||
const Operator* Word64Ror();
|
||||
const Operator* Word64Clz();
|
||||
const OptionalOperator Word64Ctz();
|
||||
@ -859,6 +841,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
|
||||
V(Word, Xor) \
|
||||
V(Word, Shl) \
|
||||
V(Word, Shr) \
|
||||
V(Word, Sar) \
|
||||
V(Word, Ror) \
|
||||
V(Word, Clz) \
|
||||
V(Word, Equal) \
|
||||
@ -881,13 +864,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
|
||||
#undef PSEUDO_OP
|
||||
#undef PSEUDO_OP_LIST
|
||||
|
||||
const Operator* WordSar(ShiftKind kind = ShiftKind::kNormal) {
|
||||
return Is32() ? Word32Sar(kind) : Word64Sar(kind);
|
||||
}
|
||||
const Operator* WordSarShiftOutZeros() {
|
||||
return WordSar(ShiftKind::kShiftOutZeros);
|
||||
}
|
||||
|
||||
private:
|
||||
Zone* zone_;
|
||||
MachineRepresentation const word_;
|
||||
|
@ -314,32 +314,22 @@ bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
|
||||
return mask == 3;
|
||||
}
|
||||
|
||||
void Node::Print(int depth) const {
|
||||
void Node::Print() const {
|
||||
StdoutStream os;
|
||||
Print(os, depth);
|
||||
Print(os);
|
||||
}
|
||||
|
||||
namespace {
|
||||
void PrintNode(const Node* node, std::ostream& os, int depth,
|
||||
int indentation = 0) {
|
||||
for (int i = 0; i < indentation; ++i) {
|
||||
void Node::Print(std::ostream& os) const {
|
||||
os << *this << std::endl;
|
||||
for (Node* input : this->inputs()) {
|
||||
os << " ";
|
||||
if (input) {
|
||||
os << *input;
|
||||
} else {
|
||||
os << "(NULL)";
|
||||
}
|
||||
os << std::endl;
|
||||
}
|
||||
if (node) {
|
||||
os << *node;
|
||||
} else {
|
||||
os << "(NULL)";
|
||||
}
|
||||
os << std::endl;
|
||||
if (depth <= 0) return;
|
||||
for (Node* input : node->inputs()) {
|
||||
PrintNode(input, os, depth - 1, indentation + 1);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
void Node::Print(std::ostream& os, int depth) const {
|
||||
PrintNode(this, os, depth);
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const Node& n) {
|
||||
|
@ -144,8 +144,8 @@ class V8_EXPORT_PRIVATE Node final {
|
||||
// Returns true if {owner1} and {owner2} are the only users of {this} node.
|
||||
bool OwnedBy(Node const* owner1, Node const* owner2) const;
|
||||
|
||||
void Print(int depth = 1) const;
|
||||
void Print(std::ostream&, int depth = 1) const;
|
||||
void Print() const;
|
||||
void Print(std::ostream&) const;
|
||||
|
||||
private:
|
||||
struct Use;
|
||||
|
@ -1955,12 +1955,10 @@ struct CsaOptimizationPhase {
|
||||
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
|
||||
data->broker(), data->common(),
|
||||
data->machine(), temp_zone);
|
||||
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
|
||||
AddReducer(data, &graph_reducer, &branch_condition_elimination);
|
||||
AddReducer(data, &graph_reducer, &dead_code_elimination);
|
||||
AddReducer(data, &graph_reducer, &machine_reducer);
|
||||
AddReducer(data, &graph_reducer, &common_reducer);
|
||||
AddReducer(data, &graph_reducer, &value_numbering);
|
||||
graph_reducer.ReduceGraph();
|
||||
}
|
||||
};
|
||||
|
@ -311,9 +311,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
||||
Node* WordSar(Node* a, Node* b) {
|
||||
return AddNode(machine()->WordSar(), a, b);
|
||||
}
|
||||
Node* WordSarShiftOutZeros(Node* a, Node* b) {
|
||||
return AddNode(machine()->WordSarShiftOutZeros(), a, b);
|
||||
}
|
||||
Node* WordRor(Node* a, Node* b) {
|
||||
return AddNode(machine()->WordRor(), a, b);
|
||||
}
|
||||
@ -349,9 +346,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
||||
Node* Word32Sar(Node* a, Node* b) {
|
||||
return AddNode(machine()->Word32Sar(), a, b);
|
||||
}
|
||||
Node* Word32SarShiftOutZeros(Node* a, Node* b) {
|
||||
return AddNode(machine()->Word32SarShiftOutZeros(), a, b);
|
||||
}
|
||||
Node* Word32Ror(Node* a, Node* b) {
|
||||
return AddNode(machine()->Word32Ror(), a, b);
|
||||
}
|
||||
|
@ -3940,54 +3940,6 @@ TEST(WasmTaggedToFloat64) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(SmiUntagLeftShiftOptimization) {
|
||||
Isolate* isolate(CcTest::InitIsolateOnce());
|
||||
const int kNumParams = 1;
|
||||
CodeAssemblerTester asm_tester(isolate, kNumParams);
|
||||
CodeStubAssembler m(asm_tester.state());
|
||||
|
||||
{
|
||||
TNode<TaggedIndex> param =
|
||||
TNode<TaggedIndex>::UncheckedCast(m.Parameter(0));
|
||||
TNode<WordT> unoptimized =
|
||||
m.IntPtrMul(m.TaggedIndexToIntPtr(param), m.IntPtrConstant(8));
|
||||
TNode<WordT> optimized = m.WordShl(
|
||||
m.BitcastTaggedToWordForTagAndSmiBits(param), 3 - kSmiTagSize);
|
||||
m.StaticAssert(m.WordEqual(unoptimized, optimized));
|
||||
m.Return(m.UndefinedConstant());
|
||||
}
|
||||
|
||||
AssemblerOptions options = AssemblerOptions::Default(isolate);
|
||||
FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
|
||||
}
|
||||
|
||||
TEST(SmiUntagComparisonOptimization) {
|
||||
Isolate* isolate(CcTest::InitIsolateOnce());
|
||||
const int kNumParams = 2;
|
||||
CodeAssemblerTester asm_tester(isolate, kNumParams);
|
||||
CodeStubAssembler m(asm_tester.state());
|
||||
|
||||
{
|
||||
TNode<Smi> a = TNode<Smi>::UncheckedCast(m.Parameter(0));
|
||||
TNode<Smi> b = TNode<Smi>::UncheckedCast(m.Parameter(1));
|
||||
TNode<BoolT> unoptimized = m.UintPtrLessThan(m.SmiUntag(a), m.SmiUntag(b));
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
TNode<BoolT> optimized = m.Uint32LessThan(
|
||||
m.TruncateIntPtrToInt32(m.BitcastTaggedToWordForTagAndSmiBits(a)),
|
||||
m.TruncateIntPtrToInt32(m.BitcastTaggedToWordForTagAndSmiBits(b)));
|
||||
#else
|
||||
TNode<BoolT> optimized =
|
||||
m.UintPtrLessThan(m.BitcastTaggedToWordForTagAndSmiBits(a),
|
||||
m.BitcastTaggedToWordForTagAndSmiBits(b));
|
||||
#endif
|
||||
m.StaticAssert(m.Word32Equal(unoptimized, optimized));
|
||||
m.Return(m.UndefinedConstant());
|
||||
}
|
||||
|
||||
AssemblerOptions options = AssemblerOptions::Default(isolate);
|
||||
FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -3,7 +3,6 @@
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/machine-operator-reducer.h"
|
||||
#include <limits>
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/division-by-constant.h"
|
||||
#include "src/base/ieee754.h"
|
||||
@ -318,7 +317,6 @@ const ComparisonBinaryOperator kComparisonBinaryOperators[] = {
|
||||
|
||||
// Avoid undefined behavior on signed integer overflow.
|
||||
int32_t Shl(int32_t x, int32_t y) { return static_cast<uint32_t>(x) << y; }
|
||||
int64_t Shl(int64_t x, int64_t y) { return static_cast<uint64_t>(x) << y; }
|
||||
|
||||
} // namespace
|
||||
|
||||
@ -1093,123 +1091,11 @@ TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) {
|
||||
Int32Constant(x));
|
||||
Reduction r = Reduce(node);
|
||||
ASSERT_TRUE(r.Changed());
|
||||
int32_t m = static_cast<int32_t>(std::numeric_limits<uint32_t>::max() << x);
|
||||
int32_t m = static_cast<int32_t>(~((1U << x) - 1U));
|
||||
EXPECT_THAT(r.replacement(), IsWord32And(p0, IsInt32Constant(m)));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32SarShiftOutZeros) {
|
||||
Node* p = Parameter(0);
|
||||
TRACED_FORRANGE(int32_t, x, 1, 31) {
|
||||
TRACED_FORRANGE(int32_t, y, 0, 31) {
|
||||
Node* node = graph()->NewNode(
|
||||
machine()->Word32Shl(),
|
||||
graph()->NewNode(machine()->Word32Sar(ShiftKind::kShiftOutZeros), p,
|
||||
Int32Constant(x)),
|
||||
Int32Constant(y));
|
||||
Reduction r = Reduce(node);
|
||||
ASSERT_TRUE(r.Changed());
|
||||
if (x == y) {
|
||||
// (p >> x) << y => p
|
||||
EXPECT_THAT(r.replacement(), p);
|
||||
} else if (x < y) {
|
||||
// (p >> x) << y => p << (y - x)
|
||||
EXPECT_THAT(r.replacement(), IsWord32Shl(p, IsInt32Constant(y - x)));
|
||||
} else {
|
||||
// (p >> x) << y => p >> (x - y)
|
||||
EXPECT_THAT(r.replacement(), IsWord32Sar(p, IsInt32Constant(x - y)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Word64Shl
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, Word64ShlWithZeroShift) {
|
||||
Node* p0 = Parameter(0);
|
||||
Node* node = graph()->NewNode(machine()->Word64Shl(), p0, Int64Constant(0));
|
||||
Reduction r = Reduce(node);
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_EQ(p0, r.replacement());
|
||||
}
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, Word64ShlWithWord64Sar) {
|
||||
Node* p0 = Parameter(0);
|
||||
TRACED_FORRANGE(int64_t, x, 1, 63) {
|
||||
Node* node = graph()->NewNode(
|
||||
machine()->Word64Shl(),
|
||||
graph()->NewNode(machine()->Word64Sar(), p0, Int64Constant(x)),
|
||||
Int64Constant(x));
|
||||
Reduction r = Reduce(node);
|
||||
ASSERT_TRUE(r.Changed());
|
||||
int64_t m = static_cast<int64_t>(~(Shl(int64_t{1}, x) - 1));
|
||||
EXPECT_THAT(r.replacement(), IsWord64And(p0, IsInt64Constant(m)));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(MachineOperatorReducerTest,
|
||||
Word64ShlWithWord64SarAndInt64AddAndConstant) {
|
||||
Node* const p0 = Parameter(0);
|
||||
TRACED_FOREACH(int64_t, k, kInt64Values) {
|
||||
TRACED_FORRANGE(int64_t, l, 1, 63) {
|
||||
if (Shl(k, l) == 0) continue;
|
||||
// (x + (K << L)) >> L << L => (x & (-1 << L)) + (K << L)
|
||||
Reduction const r = Reduce(graph()->NewNode(
|
||||
machine()->Word64Shl(),
|
||||
graph()->NewNode(machine()->Word64Sar(),
|
||||
graph()->NewNode(machine()->Int64Add(), p0,
|
||||
Int64Constant(Shl(k, l))),
|
||||
Int64Constant(l)),
|
||||
Int64Constant(l)));
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(
|
||||
r.replacement(),
|
||||
IsInt64Add(IsWord64And(p0, IsInt64Constant(Shl(int64_t{-1}, l))),
|
||||
IsInt64Constant(Shl(k, l))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, Word64ShlWithWord64Shr) {
|
||||
Node* p0 = Parameter(0);
|
||||
TRACED_FORRANGE(int64_t, x, 1, 63) {
|
||||
Node* node = graph()->NewNode(
|
||||
machine()->Word64Shl(),
|
||||
graph()->NewNode(machine()->Word64Shr(), p0, Int64Constant(x)),
|
||||
Int64Constant(x));
|
||||
Reduction r = Reduce(node);
|
||||
ASSERT_TRUE(r.Changed());
|
||||
int64_t m = static_cast<int64_t>(std::numeric_limits<uint64_t>::max() << x);
|
||||
EXPECT_THAT(r.replacement(), IsWord64And(p0, IsInt64Constant(m)));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, Word64ShlWithWord64SarShiftOutZeros) {
|
||||
Node* p = Parameter(0);
|
||||
TRACED_FORRANGE(int64_t, x, 1, 63) {
|
||||
TRACED_FORRANGE(int64_t, y, 0, 63) {
|
||||
Node* node = graph()->NewNode(
|
||||
machine()->Word64Shl(),
|
||||
graph()->NewNode(machine()->Word64Sar(ShiftKind::kShiftOutZeros), p,
|
||||
Int64Constant(x)),
|
||||
Int64Constant(y));
|
||||
Reduction r = Reduce(node);
|
||||
ASSERT_TRUE(r.Changed());
|
||||
if (x == y) {
|
||||
// (p >> x) << y => p
|
||||
EXPECT_THAT(r.replacement(), p);
|
||||
} else if (x < y) {
|
||||
// (p >> x) << y => p << (y - x)
|
||||
EXPECT_THAT(r.replacement(), IsWord64Shl(p, IsInt64Constant(y - x)));
|
||||
} else {
|
||||
// (p >> x) << y => p >> (x - y)
|
||||
EXPECT_THAT(r.replacement(), IsWord64Sar(p, IsInt64Constant(x - y)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Word32Equal
|
||||
|
||||
@ -2005,29 +1891,6 @@ TEST_F(MachineOperatorReducerTest, Int32LessThanWithWord32Or) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, Int32LessThanWithWord32SarShiftOutZeros) {
|
||||
Node* const p0 = Parameter(0);
|
||||
Node* const p1 = Parameter(1);
|
||||
TRACED_FORRANGE(int32_t, shift0, 1, 3) {
|
||||
TRACED_FORRANGE(int32_t, shift1, 1, 3) {
|
||||
Node* const node =
|
||||
graph()->NewNode(machine()->Int32LessThan(),
|
||||
graph()->NewNode(machine()->Word32SarShiftOutZeros(),
|
||||
p0, Int32Constant(shift0)),
|
||||
graph()->NewNode(machine()->Word32SarShiftOutZeros(),
|
||||
p1, Int32Constant(shift1)));
|
||||
|
||||
Reduction r = Reduce(node);
|
||||
if (shift0 == shift1) {
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(), IsInt32LessThan(p0, p1));
|
||||
} else {
|
||||
ASSERT_FALSE(r.Changed());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Uint32LessThan
|
||||
|
||||
@ -2048,80 +1911,6 @@ TEST_F(MachineOperatorReducerTest, Uint32LessThanWithWord32Sar) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, Uint32LessThanWithWord32SarShiftOutZeros) {
|
||||
Node* const p0 = Parameter(0);
|
||||
Node* const p1 = Parameter(1);
|
||||
TRACED_FORRANGE(int32_t, shift0, 1, 3) {
|
||||
TRACED_FORRANGE(int32_t, shift1, 1, 3) {
|
||||
Node* const node =
|
||||
graph()->NewNode(machine()->Uint32LessThan(),
|
||||
graph()->NewNode(machine()->Word32SarShiftOutZeros(),
|
||||
p0, Int32Constant(shift0)),
|
||||
graph()->NewNode(machine()->Word32SarShiftOutZeros(),
|
||||
p1, Int32Constant(shift1)));
|
||||
|
||||
Reduction r = Reduce(node);
|
||||
if (shift0 == shift1) {
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(), IsUint32LessThan(p0, p1));
|
||||
} else {
|
||||
ASSERT_FALSE(r.Changed());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Uint64LessThan
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, Uint64LessThanWithWord64SarShiftOutZeros) {
|
||||
Node* const p0 = Parameter(0);
|
||||
Node* const p1 = Parameter(1);
|
||||
TRACED_FORRANGE(int64_t, shift0, 1, 3) {
|
||||
TRACED_FORRANGE(int64_t, shift1, 1, 3) {
|
||||
Node* const node =
|
||||
graph()->NewNode(machine()->Uint64LessThan(),
|
||||
graph()->NewNode(machine()->Word64SarShiftOutZeros(),
|
||||
p0, Int64Constant(shift0)),
|
||||
graph()->NewNode(machine()->Word64SarShiftOutZeros(),
|
||||
p1, Int64Constant(shift1)));
|
||||
|
||||
Reduction r = Reduce(node);
|
||||
if (shift0 == shift1) {
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(), IsUint64LessThan(p0, p1));
|
||||
} else {
|
||||
ASSERT_FALSE(r.Changed());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Int64LessThan
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, Int64LessThanWithWord64SarShiftOutZeros) {
|
||||
Node* const p0 = Parameter(0);
|
||||
Node* const p1 = Parameter(1);
|
||||
TRACED_FORRANGE(int64_t, shift0, 1, 3) {
|
||||
TRACED_FORRANGE(int64_t, shift1, 1, 3) {
|
||||
Node* const node =
|
||||
graph()->NewNode(machine()->Int64LessThan(),
|
||||
graph()->NewNode(machine()->Word64SarShiftOutZeros(),
|
||||
p0, Int64Constant(shift0)),
|
||||
graph()->NewNode(machine()->Word64SarShiftOutZeros(),
|
||||
p1, Int64Constant(shift1)));
|
||||
|
||||
Reduction r = Reduce(node);
|
||||
if (shift0 == shift1) {
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(), IsInt64LessThan(p0, p1));
|
||||
} else {
|
||||
ASSERT_FALSE(r.Changed());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Float64Mul
|
||||
|
@ -2187,8 +2187,6 @@ IS_BINOP_MATCHER(Int64Add)
|
||||
IS_BINOP_MATCHER(Int64Div)
|
||||
IS_BINOP_MATCHER(Int64Sub)
|
||||
IS_BINOP_MATCHER(Int64Mul)
|
||||
IS_BINOP_MATCHER(Int64LessThan)
|
||||
IS_BINOP_MATCHER(Uint64LessThan)
|
||||
IS_BINOP_MATCHER(JSAdd)
|
||||
IS_BINOP_MATCHER(JSParseInt)
|
||||
IS_BINOP_MATCHER(Float32Equal)
|
||||
|
@ -415,10 +415,6 @@ Matcher<Node*> IsInt64Mul(const Matcher<Node*>& lhs_matcher,
|
||||
const Matcher<Node*>& rhs_matcher);
|
||||
Matcher<Node*> IsInt64Div(const Matcher<Node*>& lhs_matcher,
|
||||
const Matcher<Node*>& rhs_matcher);
|
||||
Matcher<Node*> IsInt64LessThan(const Matcher<Node*>& lhs_matcher,
|
||||
const Matcher<Node*>& rhs_matcher);
|
||||
Matcher<Node*> IsUint64LessThan(const Matcher<Node*>& lhs_matcher,
|
||||
const Matcher<Node*>& rhs_matcher);
|
||||
Matcher<Node*> IsJSAdd(const Matcher<Node*>& lhs_matcher,
|
||||
const Matcher<Node*>& rhs_matcher);
|
||||
Matcher<Node*> IsJSParseInt(const Matcher<Node*>& lhs_matcher,
|
||||
|
Loading…
Reference in New Issue
Block a user