s390: remove RotLeftAndMask32 on s390

RotleftAndMask32 is not efficient on s390

R=bjaideep@ca.ibm.com, joransiu@ca.ibm.com
BUG=

Review-Url: https://codereview.chromium.org/2638813002
Cr-Commit-Position: refs/heads/master@{#42387}
This commit is contained in:
jyan 2017-01-16 12:02:23 -08:00 committed by Commit bot
parent 310a899773
commit 738cb6a759
4 changed files with 2 additions and 90 deletions

View File

@ -1233,25 +1233,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Not64:
__ Not64(i.OutputRegister(), i.InputRegister(0));
break;
case kS390_RotLeftAndMask32:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int shiftAmount = i.InputInt32(1);
int endBit = 63 - i.InputInt32(3);
int startBit = 63 - i.InputInt32(2);
__ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
__ risbg(i.OutputRegister(), i.OutputRegister(), Operand(startBit),
Operand(endBit), Operand::Zero(), true);
} else {
int shiftAmount = i.InputInt32(1);
int clearBitLeft = 63 - i.InputInt32(2);
int clearBitRight = i.InputInt32(3);
__ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
__ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitLeft));
__ srlg(i.OutputRegister(), i.OutputRegister(),
Operand((clearBitLeft + clearBitRight)));
__ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitRight));
}
break;
#if V8_TARGET_ARCH_S390X
case kS390_RotLeftAndClear64:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {

View File

@ -31,7 +31,6 @@ namespace compiler {
V(S390_RotRight64) \
V(S390_Not32) \
V(S390_Not64) \
V(S390_RotLeftAndMask32) \
V(S390_RotLeftAndClear64) \
V(S390_RotLeftAndClearLeft64) \
V(S390_RotLeftAndClearRight64) \

View File

@ -32,7 +32,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_RotRight64:
case kS390_Not32:
case kS390_Not64:
case kS390_RotLeftAndMask32:
case kS390_RotLeftAndClear64:
case kS390_RotLeftAndClearLeft64:
case kS390_RotLeftAndClearRight64:

View File

@ -544,6 +544,7 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
g.UseOperand(length, kUint32Imm), g.UseRegister(value));
}
#if 0
static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
int mask_width = base::bits::CountPopulation32(value);
int mask_msb = base::bits::CountLeadingZeros32(value);
@ -554,6 +555,7 @@ static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
*me = mask_lsb;
return true;
}
#endif
#if V8_TARGET_ARCH_S390X
static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
@ -569,36 +571,6 @@ static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
#endif
void InstructionSelector::VisitWord32And(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
int mb = 0;
int me = 0;
if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
CanCover(node, left)) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 31)) {
left = mleft.left().node();
sh = mleft.right().Value();
if (m.left().IsWord32Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
sh = (32 - sh) & 0x1f;
} else {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
}
}
}
if (mb >= me) {
Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
g.UseRegister(left), g.TempImmediate(sh), g.TempImmediate(mb),
g.TempImmediate(me));
return;
}
}
VisitBinop<Int32BinopMatcher>(this, node, kS390_And32, kUint32Imm);
}
@ -690,25 +662,6 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
#endif
void InstructionSelector::VisitWord32Shl(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
Int32BinopMatcher mleft(m.left().node());
int sh = m.right().Value();
int mb;
int me;
if (mleft.right().HasValue() &&
IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
if (mb >= me) {
Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
g.TempImmediate(mb), g.TempImmediate(me));
return;
}
}
}
VisitRRO(this, kS390_ShiftLeft32, node, kShift32Imm);
}
@ -757,26 +710,6 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
#endif
void InstructionSelector::VisitWord32Shr(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
Int32BinopMatcher mleft(m.left().node());
int sh = m.right().Value();
int mb;
int me;
if (mleft.right().HasValue() &&
IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
sh = (32 - sh) & 0x1f;
if (mb >= me) {
Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
g.TempImmediate(mb), g.TempImmediate(me));
return;
}
}
}
VisitRRO(this, kS390_ShiftRight32, node, kShift32Imm);
}