s390: TF Codegen Optimization

List of items:
  1. Avoid zero-extending for subsequent 32-bit operations if current operation does not change upper 32-bit or does zero-extending.
  2. Match complex address mode for binary operation where possible (eg. use Add R,MEM).
  3. Detect instruction forms in selector. Eg. kAllowRRR, kAllowRM
  4. Optimize sequence for Int32MulWithOverflow, Int32Div, etc.
  5. Remove Not32/Not64 which is the same as XOR

R=bjaideep@ca.ibm.com, joransiu@ca.ibm.com
BUG=

Review-Url: https://codereview.chromium.org/2649113007
Cr-Commit-Position: refs/heads/master@{#42669}
This commit is contained in:
jyan 2017-01-25 15:42:14 -08:00 committed by Commit bot
parent c8691efb38
commit f7a3ede082
10 changed files with 892 additions and 297 deletions

View File

@ -302,8 +302,176 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
return kNoCondition;
}
typedef void (MacroAssembler::*RRTypeInstr)(Register, Register);
typedef void (MacroAssembler::*RMTypeInstr)(Register, const MemOperand&);
typedef void (MacroAssembler::*RITypeInstr)(Register, const Operand&);
typedef void (MacroAssembler::*RRRTypeInstr)(Register, Register, Register);
typedef void (MacroAssembler::*RRMTypeInstr)(Register, Register,
const MemOperand&);
typedef void (MacroAssembler::*RRITypeInstr)(Register, Register,
const Operand&);
#define CHECK_AND_ZERO_EXT_OUTPUT(num) \
{ \
CHECK(HasImmediateInput(instr, (num))); \
int doZeroExt = i.InputInt32(num); \
if (doZeroExt) masm->LoadlW(i.OutputRegister(), i.OutputRegister()); \
}
void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
Instruction* instr, RRTypeInstr rr_instr,
RMTypeInstr rm_instr, RITypeInstr ri_instr) {
CHECK(i.OutputRegister().is(i.InputRegister(0)));
AddressingMode mode = AddressingModeField::decode(instr->opcode());
int zeroExtIndex = 2;
if (mode != kMode_None) {
size_t first_index = 1;
MemOperand operand = i.MemoryOperand(&mode, &first_index);
zeroExtIndex = first_index;
CHECK(rm_instr != NULL);
(masm->*rm_instr)(i.OutputRegister(), operand);
} else if (HasRegisterInput(instr, 1)) {
(masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
} else if (HasImmediateInput(instr, 1)) {
(masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
} else if (HasStackSlotInput(instr, 1)) {
(masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
} else {
UNREACHABLE();
}
CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
}
void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
Instruction* instr, RRRTypeInstr rrr_instr,
RMTypeInstr rm_instr, RITypeInstr ri_instr) {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
int zeroExtIndex = 2;
if (mode != kMode_None) {
CHECK(i.OutputRegister().is(i.InputRegister(0)));
size_t first_index = 1;
MemOperand operand = i.MemoryOperand(&mode, &first_index);
zeroExtIndex = first_index;
CHECK(rm_instr != NULL);
(masm->*rm_instr)(i.OutputRegister(), operand);
} else if (HasRegisterInput(instr, 1)) {
(masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1));
} else if (HasImmediateInput(instr, 1)) {
CHECK(i.OutputRegister().is(i.InputRegister(0)));
(masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
} else if (HasStackSlotInput(instr, 1)) {
CHECK(i.OutputRegister().is(i.InputRegister(0)));
(masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
} else {
UNREACHABLE();
}
CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
}
void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
Instruction* instr, RRRTypeInstr rrr_instr,
RMTypeInstr rm_instr, RRITypeInstr rri_instr) {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
int zeroExtIndex = 2;
if (mode != kMode_None) {
CHECK(i.OutputRegister().is(i.InputRegister(0)));
size_t first_index = 1;
MemOperand operand = i.MemoryOperand(&mode, &first_index);
zeroExtIndex = first_index;
CHECK(rm_instr != NULL);
(masm->*rm_instr)(i.OutputRegister(), operand);
} else if (HasRegisterInput(instr, 1)) {
(masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1));
} else if (HasImmediateInput(instr, 1)) {
(masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
i.InputImmediate(1));
} else if (HasStackSlotInput(instr, 1)) {
CHECK(i.OutputRegister().is(i.InputRegister(0)));
(masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
} else {
UNREACHABLE();
}
CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
}
void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
Instruction* instr, RRRTypeInstr rrr_instr,
RRMTypeInstr rrm_instr, RRITypeInstr rri_instr) {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
int zeroExtIndex = 2;
if (mode != kMode_None) {
size_t first_index = 1;
MemOperand operand = i.MemoryOperand(&mode, &first_index);
zeroExtIndex = first_index;
CHECK(rrm_instr != NULL);
(masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0), operand);
} else if (HasRegisterInput(instr, 1)) {
(masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1));
} else if (HasImmediateInput(instr, 1)) {
(masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
i.InputImmediate(1));
} else if (HasStackSlotInput(instr, 1)) {
(masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0),
i.InputStackSlot32(1));
} else {
UNREACHABLE();
}
CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
}
void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
Instruction* instr, RRRTypeInstr rrr_instr,
RRITypeInstr rri_instr) {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
CHECK(mode == kMode_None);
int zeroExtIndex = 2;
if (HasRegisterInput(instr, 1)) {
(masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1));
} else if (HasImmediateInput(instr, 1)) {
(masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
i.InputImmediate(1));
} else {
UNREACHABLE();
}
CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
}
void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
Instruction* instr, RRTypeInstr rr_instr,
RITypeInstr ri_instr) {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
CHECK(mode == kMode_None);
CHECK(i.OutputRegister().is(i.InputRegister(0)));
int zeroExtIndex = 2;
if (HasRegisterInput(instr, 1)) {
(masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
} else if (HasImmediateInput(instr, 1)) {
(masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
} else {
UNREACHABLE();
}
CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
}
#define ASSEMBLE_BIN_OP(instr1, instr2, instr3) \
AssembleBinOp(i, masm(), instr, &MacroAssembler::instr1, \
&MacroAssembler::instr2, &MacroAssembler::instr3)
#undef CHECK_AND_ZERO_EXT_OUTPUT
} // namespace
#define CHECK_AND_ZERO_EXT_OUTPUT(num) \
{ \
CHECK(HasImmediateInput(instr, (num))); \
int doZeroExt = i.InputInt32(num); \
if (doZeroExt) __ LoadlW(i.OutputRegister(), i.OutputRegister()); \
}
#define ASSEMBLE_FLOAT_UNOP(asm_instr) \
do { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
@ -359,7 +527,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
__ LoadRR(r0, i.InputRegister(0)); \
__ shift_instr(r0, Operand(32)); \
__ div_instr(r0, i.InputRegister(1)); \
__ ltr(i.OutputRegister(), r0); \
__ LoadlW(i.OutputRegister(), r0); \
} while (0)
#define ASSEMBLE_FLOAT_MODULO() \
@ -1058,35 +1226,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_And32:
ASSEMBLE_BINOP(And);
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
ASSEMBLE_BIN_OP(nrk, And, nilf);
} else {
ASSEMBLE_BIN_OP(nr, And, nilf);
}
break;
case kS390_And64:
ASSEMBLE_BINOP(AndP);
break;
case kS390_Or32:
ASSEMBLE_BINOP(Or);
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
ASSEMBLE_BIN_OP(ork, Or, oilf);
} else {
ASSEMBLE_BIN_OP(or_z, Or, oilf);
}
break;
case kS390_Or64:
ASSEMBLE_BINOP(OrP);
break;
case kS390_Xor32:
ASSEMBLE_BINOP(Xor);
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
ASSEMBLE_BIN_OP(xrk, Xor, xilf);
} else {
ASSEMBLE_BIN_OP(xr, Xor, xilf);
}
break;
case kS390_Xor64:
ASSEMBLE_BINOP(XorP);
break;
case kS390_ShiftLeft32:
if (HasRegisterInput(instr, 1)) {
if (i.OutputRegister().is(i.InputRegister(1)) &&
!CpuFeatures::IsSupported(DISTINCT_OPS)) {
__ LoadRR(kScratchReg, i.InputRegister(1));
__ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
AssembleBinOp(i, masm(), instr, &MacroAssembler::sllk,
&MacroAssembler::sllk);
} else {
ASSEMBLE_BINOP(ShiftLeft);
AssembleBinOp(i, masm(), instr, &MacroAssembler::sll,
&MacroAssembler::sll);
}
} else {
ASSEMBLE_BINOP(ShiftLeft);
}
__ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
break;
#if V8_TARGET_ARCH_S390X
case kS390_ShiftLeft64:
@ -1094,18 +1270,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#endif
case kS390_ShiftRight32:
if (HasRegisterInput(instr, 1)) {
if (i.OutputRegister().is(i.InputRegister(1)) &&
!CpuFeatures::IsSupported(DISTINCT_OPS)) {
__ LoadRR(kScratchReg, i.InputRegister(1));
__ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
AssembleBinOp(i, masm(), instr, &MacroAssembler::srlk,
&MacroAssembler::srlk);
} else {
ASSEMBLE_BINOP(ShiftRight);
AssembleBinOp(i, masm(), instr, &MacroAssembler::srl,
&MacroAssembler::srl);
}
} else {
ASSEMBLE_BINOP(ShiftRight);
}
__ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
break;
#if V8_TARGET_ARCH_S390X
case kS390_ShiftRight64:
@ -1113,19 +1284,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#endif
case kS390_ShiftRightArith32:
if (HasRegisterInput(instr, 1)) {
if (i.OutputRegister().is(i.InputRegister(1)) &&
!CpuFeatures::IsSupported(DISTINCT_OPS)) {
__ LoadRR(kScratchReg, i.InputRegister(1));
__ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
kScratchReg);
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
AssembleBinOp(i, masm(), instr, &MacroAssembler::srak,
&MacroAssembler::srak);
} else {
ASSEMBLE_BINOP(ShiftRightArith);
AssembleBinOp(i, masm(), instr, &MacroAssembler::sra,
&MacroAssembler::sra);
}
} else {
ASSEMBLE_BINOP(ShiftRightArith);
}
__ LoadlW(i.OutputRegister(), i.OutputRegister());
break;
#if V8_TARGET_ARCH_S390X
case kS390_ShiftRightArith64:
@ -1207,7 +1372,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
#endif
case kS390_RotRight32:
case kS390_RotRight32: {
if (HasRegisterInput(instr, 1)) {
__ LoadComplementRR(kScratchReg, i.InputRegister(1));
__ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
@ -1215,7 +1380,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ rll(i.OutputRegister(), i.InputRegister(0),
Operand(32 - i.InputInt32(1)));
}
CHECK_AND_ZERO_EXT_OUTPUT(2);
break;
}
#if V8_TARGET_ARCH_S390X
case kS390_RotRight64:
if (HasRegisterInput(instr, 1)) {
@ -1226,14 +1393,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(64 - i.InputInt32(1)));
}
break;
#endif
case kS390_Not32:
__ Not32(i.OutputRegister(), i.InputRegister(0));
break;
case kS390_Not64:
__ Not64(i.OutputRegister(), i.InputRegister(0));
break;
#if V8_TARGET_ARCH_S390X
case kS390_RotLeftAndClear64:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int shiftAmount = i.InputInt32(1);
@ -1282,10 +1441,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
#endif
case kS390_Add32:
ASSEMBLE_BINOP(Add32);
__ LoadW(i.OutputRegister(), i.OutputRegister());
case kS390_Add32: {
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
ASSEMBLE_BIN_OP(ark, Add32, Add32_RRI);
} else {
ASSEMBLE_BIN_OP(ar, Add32, Add32_RI);
}
break;
}
case kS390_Add64:
ASSEMBLE_BINOP(AddP);
break;
@ -1310,8 +1473,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kS390_Sub32:
ASSEMBLE_BINOP(Sub32);
__ LoadW(i.OutputRegister(), i.OutputRegister());
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
ASSEMBLE_BIN_OP(srk, Sub32, Sub32_RRI);
} else {
ASSEMBLE_BIN_OP(sr, Sub32, Sub32_RI);
}
break;
case kS390_Sub64:
ASSEMBLE_BINOP(SubP);
@ -1343,17 +1509,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kS390_Mul32:
if (HasRegisterInput(instr, 1)) {
__ Mul32(i.InputRegister(0), i.InputRegister(1));
} else if (HasImmediateInput(instr, 1)) {
__ Mul32(i.InputRegister(0), i.InputImmediate(1));
} else if (HasStackSlotInput(instr, 1)) {
__ Mul32(i.InputRegister(0), i.InputStackSlot32(1));
} else {
UNIMPLEMENTED();
}
ASSEMBLE_BIN_OP(Mul32, Mul32, Mul32);
break;
case kS390_Mul32WithOverflow:
ASSEMBLE_BIN_OP(Mul32WithOverflowIfCCUnequal,
Mul32WithOverflowIfCCUnequal,
Mul32WithOverflowIfCCUnequal);
break;
case kS390_Mul64:
CHECK(i.OutputRegister().is(i.InputRegister(0)));
if (HasRegisterInput(instr, 1)) {
__ Mul64(i.InputRegister(0), i.InputRegister(1));
} else if (HasImmediateInput(instr, 1)) {
@ -1365,15 +1529,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kS390_MulHigh32:
__ LoadRR(r1, i.InputRegister(0));
if (HasRegisterInput(instr, 1)) {
__ mr_z(r0, i.InputRegister(1));
} else if (HasStackSlotInput(instr, 1)) {
__ mfy(r0, i.InputStackSlot32(1));
} else {
UNIMPLEMENTED();
}
__ LoadW(i.OutputRegister(), r0);
ASSEMBLE_BIN_OP(MulHigh32, MulHigh32, MulHigh32);
break;
case kS390_Mul32WithHigh32:
__ LoadRR(r1, i.InputRegister(0));
@ -1419,13 +1575,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
break;
#endif
case kS390_Div32:
__ LoadRR(r0, i.InputRegister(0));
__ srda(r0, Operand(32));
__ dr(r0, i.InputRegister(1));
__ LoadAndTestP_ExtendSrc(i.OutputRegister(),
r1); // Copy R1: Quotient to output
case kS390_Div32: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
__ lgfr(r1, i.InputRegister(0));
if (mode != kMode_None) {
size_t first_index = 1;
MemOperand operand = i.MemoryOperand(&mode, &first_index);
__ dsgf(r0, operand);
} else if (HasRegisterInput(instr, 1)) {
__ dsgfr(r0, i.InputRegister(1));
} else if (HasStackSlotInput(instr, 1)) {
__ dsgf(r0, i.InputStackSlot32(1));
} else {
UNREACHABLE();
}
__ LoadlW(i.OutputRegister(), r1);
break;
}
#if V8_TARGET_ARCH_S390X
case kS390_DivU64:
__ LoadRR(r1, i.InputRegister(0));
@ -1434,14 +1600,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
break;
#endif
case kS390_DivU32:
__ LoadRR(r0, i.InputRegister(0));
case kS390_DivU32: {
__ lr(r0, i.InputRegister(0));
__ srdl(r0, Operand(32));
__ dlr(r0, i.InputRegister(1)); // R0:R1: Dividend
__ LoadlW(i.OutputRegister(), r1); // Copy R1: Quotient to output
__ LoadAndTestP_ExtendSrc(r1, r1);
AddressingMode mode = AddressingModeField::decode(instr->opcode());
if (mode != kMode_None) {
size_t first_index = 1;
MemOperand operand = i.MemoryOperand(&mode, &first_index);
__ dl(r0, operand);
} else if (HasRegisterInput(instr, 1)) {
__ dlr(r0, i.InputRegister(1));
} else if (HasStackSlotInput(instr, 1)) {
__ dl(r0, i.InputStackSlot32(1));
} else {
UNREACHABLE();
}
__ LoadlW(i.OutputRegister(), r1);
break;
}
case kS390_DivFloat:
// InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
@ -1575,7 +1751,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kS390_Neg32:
__ lcr(i.OutputRegister(), i.InputRegister(0));
__ LoadW(i.OutputRegister(), i.OutputRegister());
CHECK_AND_ZERO_EXT_OUTPUT(1);
break;
case kS390_Neg64:
__ lcgr(i.OutputRegister(), i.InputRegister(0));
@ -1731,18 +1907,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_ExtendSignWord8:
#if V8_TARGET_ARCH_S390X
__ lgbr(i.OutputRegister(), i.InputRegister(0));
#else
__ lbr(i.OutputRegister(), i.InputRegister(0));
#endif
CHECK_AND_ZERO_EXT_OUTPUT(1);
break;
case kS390_ExtendSignWord16:
#if V8_TARGET_ARCH_S390X
__ lghr(i.OutputRegister(), i.InputRegister(0));
#else
__ lhr(i.OutputRegister(), i.InputRegister(0));
#endif
CHECK_AND_ZERO_EXT_OUTPUT(1);
break;
#if V8_TARGET_ARCH_S390X
case kS390_ExtendSignWord32:
@ -2013,6 +2183,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_StoreDouble:
ASSEMBLE_STORE_DOUBLE();
break;
case kS390_Lay:
__ lay(i.OutputRegister(), i.MemoryOperand());
break;
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
#if V8_TARGET_ARCH_S390X
@ -2361,7 +2534,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
#endif
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
} else {
__ mov(dst, Operand(src.ToInt32()));
__ Load(dst, Operand(src.ToInt32()));
}
break;
case Constant::kInt64:
@ -2370,7 +2543,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
} else {
DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
__ mov(dst, Operand(src.ToInt64()));
__ Load(dst, Operand(src.ToInt64()));
}
#else
__ mov(dst, Operand(src.ToInt64()));

View File

@ -34,6 +34,7 @@ namespace compiler {
V(S390_RotLeftAndClear64) \
V(S390_RotLeftAndClearLeft64) \
V(S390_RotLeftAndClearRight64) \
V(S390_Lay) \
V(S390_Add32) \
V(S390_Add64) \
V(S390_AddPair) \
@ -46,6 +47,7 @@ namespace compiler {
V(S390_SubPair) \
V(S390_MulPair) \
V(S390_Mul32) \
V(S390_Mul32WithOverflow) \
V(S390_Mul32WithHigh32) \
V(S390_Mul64) \
V(S390_MulHigh32) \

View File

@ -35,6 +35,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_RotLeftAndClear64:
case kS390_RotLeftAndClearLeft64:
case kS390_RotLeftAndClearRight64:
case kS390_Lay:
case kS390_Add32:
case kS390_Add64:
case kS390_AddPair:
@ -47,6 +48,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_SubFloat:
case kS390_SubDouble:
case kS390_Mul32:
case kS390_Mul32WithOverflow:
case kS390_Mul32WithHigh32:
case kS390_Mul64:
case kS390_MulHigh32:

View File

@ -12,23 +12,61 @@ namespace v8 {
namespace internal {
namespace compiler {
enum ImmediateMode {
kShift32Imm,
kShift64Imm,
kInt32Imm,
kInt32Imm_Negate,
kUint32Imm,
kInt20Imm,
kNoImmediate
enum class OperandMode : uint32_t {
kNone = 0u,
// Immediate mode
kShift32Imm = 1u << 0,
kShift64Imm = 1u << 1,
kInt32Imm = 1u << 2,
kInt32Imm_Negate = 1u << 3,
kUint32Imm = 1u << 4,
kInt20Imm = 1u << 5,
// Instr format
kAllowRRR = 1u << 7,
kAllowRM = 1u << 8,
kAllowRI = 1u << 9,
kAllowRRI = 1u << 10,
kAllowRRM = 1u << 11,
// Useful combination
kAllowImmediate = kAllowRI | kAllowRRI,
kAllowMemoryOperand = kAllowRM | kAllowRRM,
kAllowDistinctOps = kAllowRRR | kAllowRRI | kAllowRRM,
kBitWiseCommonMode = kAllowRI | kUint32Imm,
kArithmeticCommonMode = kAllowRM | kAllowRI
};
typedef base::Flags<OperandMode, uint32_t> OperandModes;
DEFINE_OPERATORS_FOR_FLAGS(OperandModes);
OperandModes immediateModeMask =
OperandMode::kShift32Imm | OperandMode::kShift64Imm |
OperandMode::kInt32Imm | OperandMode::kInt32Imm_Negate |
OperandMode::kUint32Imm | OperandMode::kInt20Imm;
#define BitWiseOperandMode \
((OperandMode::kBitWiseCommonMode | \
(CpuFeatures::IsSupported(DISTINCT_OPS) \
? OperandMode::kAllowRRR \
: OperandMode::kBitWiseCommonMode)))
#define AddOperandMode \
((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm | \
(CpuFeatures::IsSupported(DISTINCT_OPS) \
? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
: OperandMode::kArithmeticCommonMode)))
#define SubOperandMode \
((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm_Negate | \
(CpuFeatures::IsSupported(DISTINCT_OPS) \
? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
: OperandMode::kArithmeticCommonMode)))
#define MulOperandMode \
(OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm)
// Adds S390-specific methods for generating operands.
class S390OperandGenerator final : public OperandGenerator {
public:
explicit S390OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
InstructionOperand UseOperand(Node* node, OperandModes mode) {
if (CanBeImmediate(node, mode)) {
return UseImmediate(node);
}
@ -45,7 +83,7 @@ class S390OperandGenerator final : public OperandGenerator {
return 0L;
}
bool CanBeImmediate(Node* node, ImmediateMode mode) {
bool CanBeImmediate(Node* node, OperandModes mode) {
int64_t value;
if (node->opcode() == IrOpcode::kInt32Constant)
value = OpParameter<int32_t>(node);
@ -56,23 +94,20 @@ class S390OperandGenerator final : public OperandGenerator {
return CanBeImmediate(value, mode);
}
bool CanBeImmediate(int64_t value, ImmediateMode mode) {
switch (mode) {
case kShift32Imm:
bool CanBeImmediate(int64_t value, OperandModes mode) {
if (mode & OperandMode::kShift32Imm)
return 0 <= value && value < 32;
case kShift64Imm:
else if (mode & OperandMode::kShift64Imm)
return 0 <= value && value < 64;
case kInt32Imm:
else if (mode & OperandMode::kInt32Imm)
return is_int32(value);
case kInt32Imm_Negate:
else if (mode & OperandMode::kInt32Imm_Negate)
return is_int32(-value);
case kUint32Imm:
else if (mode & OperandMode::kUint32Imm)
return is_uint32(value);
case kInt20Imm:
else if (mode & OperandMode::kInt20Imm)
return is_int20(value);
case kNoImmediate:
return false;
}
else
return false;
}
@ -131,7 +166,7 @@ class S390OperandGenerator final : public OperandGenerator {
#endif
DCHECK(m.matches());
if ((m.displacement() == nullptr ||
CanBeImmediate(m.displacement(), kInt20Imm))) {
CanBeImmediate(m.displacement(), OperandMode::kInt20Imm))) {
DCHECK(m.scale() == 0);
return GenerateMemoryOperandInputs(m.index(), m.base(), m.displacement(),
m.displacement_mode(), inputs,
@ -158,6 +193,127 @@ class S390OperandGenerator final : public OperandGenerator {
namespace {
ArchOpcode SelectLoadOpcode(Node* node) {
NodeMatcher m(node);
DCHECK(m.IsLoad());
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kS390_LoadFloat32;
break;
case MachineRepresentation::kFloat64:
opcode = kS390_LoadDouble;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
break;
#if !V8_TARGET_ARCH_S390X
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
opcode = kS390_LoadWordU32;
break;
#if V8_TARGET_ARCH_S390X
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kS390_LoadWord64;
break;
#else
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
default:
UNREACHABLE();
}
return opcode;
}
bool AutoZeroExtendsWord32ToWord64(Node* node) {
#if !V8_TARGET_ARCH_S390X
return true;
#else
switch (node->opcode()) {
case IrOpcode::kInt32Div:
case IrOpcode::kUint32Div:
case IrOpcode::kInt32MulHigh:
case IrOpcode::kInt32Mod:
case IrOpcode::kUint32Mod:
return true;
default:
return false;
}
return false;
#endif
}
bool ZeroExtendsWord32ToWord64(Node* node) {
#if !V8_TARGET_ARCH_S390X
return true;
#else
switch (node->opcode()) {
case IrOpcode::kInt32Add:
case IrOpcode::kInt32Sub:
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Xor:
case IrOpcode::kWord32Shl:
case IrOpcode::kWord32Shr:
case IrOpcode::kWord32Sar:
case IrOpcode::kInt32Mul:
case IrOpcode::kWord32Ror:
case IrOpcode::kInt32Div:
case IrOpcode::kUint32Div:
case IrOpcode::kInt32MulHigh:
case IrOpcode::kInt32Mod:
case IrOpcode::kUint32Mod:
return true;
// TODO(john.yan): consider the following case to be valid
// case IrOpcode::kWord32Equal:
// case IrOpcode::kInt32LessThan:
// case IrOpcode::kInt32LessThanOrEqual:
// case IrOpcode::kUint32LessThan:
// case IrOpcode::kUint32LessThanOrEqual:
// case IrOpcode::kUint32MulHigh:
// // These 32-bit operations implicitly zero-extend to 64-bit on x64, so
// the
// // zero-extension is a no-op.
// return true;
// case IrOpcode::kProjection: {
// Node* const value = node->InputAt(0);
// switch (value->opcode()) {
// case IrOpcode::kInt32AddWithOverflow:
// case IrOpcode::kInt32SubWithOverflow:
// case IrOpcode::kInt32MulWithOverflow:
// return true;
// default:
// return false;
// }
// }
case IrOpcode::kLoad: {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
switch (load_rep.representation()) {
case MachineRepresentation::kWord32:
return true;
default:
return false;
}
}
default:
return false;
}
#endif
}
void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
S390OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
@ -171,15 +327,15 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseRegister(node->InputAt(1)));
}
#if V8_TARGET_ARCH_S390X
void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
ImmediateMode operand_mode) {
OperandModes operand_mode) {
S390OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseOperand(node->InputAt(1), operand_mode));
}
#if V8_TARGET_ARCH_S390X
void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
S390OperandGenerator g(selector);
@ -200,7 +356,7 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
// Shared routine for multiple binary operations.
template <typename Matcher>
void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, ImmediateMode operand_mode,
InstructionCode opcode, OperandModes operand_mode,
FlagsContinuation* cont) {
S390OperandGenerator g(selector);
Matcher m(node);
@ -269,54 +425,149 @@ void VisitBinop(InstructionSelector* selector, Node* node,
// Shared routine for multiple binary operations.
template <typename Matcher>
void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
ImmediateMode operand_mode) {
OperandModes operand_mode) {
FlagsContinuation cont;
VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
}
void VisitBin32op(InstructionSelector* selector, Node* node,
InstructionCode opcode, OperandModes operand_mode,
FlagsContinuation* cont) {
S390OperandGenerator g(selector);
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
InstructionOperand inputs[8];
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
// match left of TruncateInt64ToInt32
if (m.left().IsTruncateInt64ToInt32() && selector->CanCover(node, left)) {
left = left->InputAt(0);
}
// match right of TruncateInt64ToInt32
if (m.right().IsTruncateInt64ToInt32() && selector->CanCover(node, right)) {
right = right->InputAt(0);
}
#if V8_TARGET_ARCH_S390X
if ((ZeroExtendsWord32ToWord64(right) || g.CanBeBetterLeftOperand(right)) &&
node->op()->HasProperty(Operator::kCommutative) &&
!g.CanBeImmediate(right, operand_mode)) {
std::swap(left, right);
}
#else
if (node->op()->HasProperty(Operator::kCommutative) &&
!g.CanBeImmediate(right, operand_mode) &&
(g.CanBeBetterLeftOperand(right))) {
std::swap(left, right);
}
#endif
// left is always register
InstructionOperand const left_input = g.UseRegister(left);
inputs[input_count++] = left_input;
// TODO(turbofan): match complex addressing modes.
if (left == right) {
// If both inputs refer to the same operand, enforce allocating a register
// for both of them to ensure that we don't end up generating code like
// this:
//
// mov rax, [rbp-0x10]
// add rax, [rbp-0x10]
// jo label
inputs[input_count++] = left_input;
// Can only be RR or RRR
operand_mode &= OperandMode::kAllowRRR;
} else if ((operand_mode & OperandMode::kAllowImmediate) &&
g.CanBeImmediate(right, operand_mode)) {
inputs[input_count++] = g.UseImmediate(right);
// Can only be RI or RRI
operand_mode &= OperandMode::kAllowImmediate;
} else if (operand_mode & OperandMode::kAllowMemoryOperand) {
NodeMatcher mright(right);
if (mright.IsLoad() && selector->CanCover(node, right) &&
SelectLoadOpcode(right) == kS390_LoadWordU32) {
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
operand_mode &= ~OperandMode::kAllowImmediate;
if (operand_mode & OperandMode::kAllowRM)
operand_mode &= ~OperandMode::kAllowDistinctOps;
} else if (operand_mode & OperandMode::kAllowRM) {
DCHECK(!(operand_mode & OperandMode::kAllowRRM));
inputs[input_count++] = g.Use(right);
// Can not be Immediate
operand_mode &=
~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
} else if (operand_mode & OperandMode::kAllowRRM) {
DCHECK(!(operand_mode & OperandMode::kAllowRM));
inputs[input_count++] = g.Use(right);
// Can not be Immediate
operand_mode &= ~OperandMode::kAllowImmediate;
} else {
UNREACHABLE();
}
} else {
inputs[input_count++] = g.UseRegister(right);
// Can only be RR or RRR
operand_mode &= OperandMode::kAllowRRR;
}
bool doZeroExt =
AutoZeroExtendsWord32ToWord64(node) || !ZeroExtendsWord32ToWord64(left);
inputs[input_count++] =
g.TempImmediate(doZeroExt && (!AutoZeroExtendsWord32ToWord64(node)));
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
}
if (doZeroExt && (operand_mode & OperandMode::kAllowDistinctOps) &&
// If we can deoptimize as a result of the binop, we need to make sure
// that
// the deopt inputs are not overwritten by the binop result. One way
// to achieve that is to declare the output register as same-as-first.
!cont->IsDeoptimize()) {
outputs[output_count++] = g.DefineAsRegister(node);
} else {
outputs[output_count++] = g.DefineSameAsFirst(node);
}
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
DCHECK_NE(0u, input_count);
DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
}
void VisitBin32op(InstructionSelector* selector, Node* node, ArchOpcode opcode,
OperandModes operand_mode) {
FlagsContinuation cont;
VisitBin32op(selector, node, opcode, operand_mode, &cont);
}
} // namespace
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
S390OperandGenerator g(this);
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kS390_LoadFloat32;
break;
case MachineRepresentation::kFloat64:
opcode = kS390_LoadDouble;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
break;
#if !V8_TARGET_ARCH_S390X
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
opcode = kS390_LoadWordU32;
break;
#if V8_TARGET_ARCH_S390X
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kS390_LoadWord64;
break;
#else
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
ArchOpcode opcode = SelectLoadOpcode(node);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionOperand inputs[3];
@ -350,7 +601,7 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(base);
// OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
// for the store itself, so we must check compatibility with both.
if (g.CanBeImmediate(offset, kInt20Imm)) {
if (g.CanBeImmediate(offset, OperandMode::kInt20Imm)) {
inputs[input_count++] = g.UseImmediate(offset);
addressing_mode = kMode_MRI;
} else {
@ -494,7 +745,7 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
AddressingMode addressingMode = kMode_MRR;
Emit(opcode | AddressingModeField::encode(addressingMode),
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
g.UseOperand(length, kUint32Imm));
g.UseOperand(length, OperandMode::kUint32Imm));
}
void InstructionSelector::VisitCheckedStore(Node* node) {
@ -541,7 +792,7 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
AddressingMode addressingMode = kMode_MRR;
Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
g.UseRegister(base), g.UseRegister(offset),
g.UseOperand(length, kUint32Imm), g.UseRegister(value));
g.UseOperand(length, OperandMode::kUint32Imm), g.UseRegister(value));
}
#if 0
@ -571,7 +822,8 @@ static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
#endif
void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kS390_And32, kUint32Imm);
VisitBin32op(this, node, kS390_And32,
BitWiseOperandMode | OperandMode::kAllowRM);
}
#if V8_TARGET_ARCH_S390X
@ -623,46 +875,38 @@ void InstructionSelector::VisitWord64And(Node* node) {
}
}
}
VisitBinop<Int64BinopMatcher>(this, node, kS390_And64, kUint32Imm);
VisitBinop<Int64BinopMatcher>(this, node, kS390_And64,
OperandMode::kUint32Imm);
}
#endif
void InstructionSelector::VisitWord32Or(Node* node) {
Int32BinopMatcher m(node);
VisitBinop<Int32BinopMatcher>(this, node, kS390_Or32, kUint32Imm);
VisitBin32op(this, node, kS390_Or32,
BitWiseOperandMode | OperandMode::kAllowRM);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Or(Node* node) {
Int64BinopMatcher m(node);
VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64, kUint32Imm);
VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64,
OperandMode::kUint32Imm);
}
#endif
void InstructionSelector::VisitWord32Xor(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.right().Is(-1)) {
Emit(kS390_Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
} else {
VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor32, kUint32Imm);
}
VisitBin32op(this, node, kS390_Xor32,
BitWiseOperandMode | OperandMode::kAllowRM);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Xor(Node* node) {
S390OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.right().Is(-1)) {
Emit(kS390_Not64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
} else {
VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64, kUint32Imm);
}
VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64,
OperandMode::kUint32Imm);
}
#endif
void InstructionSelector::VisitWord32Shl(Node* node) {
VisitRRO(this, kS390_ShiftLeft32, node, kShift32Imm);
VisitBin32op(this, node, kS390_ShiftLeft32, BitWiseOperandMode);
}
#if V8_TARGET_ARCH_S390X
@ -705,12 +949,12 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
}
}
}
VisitRRO(this, kS390_ShiftLeft64, node, kShift64Imm);
VisitRRO(this, kS390_ShiftLeft64, node, OperandMode::kShift64Imm);
}
#endif
void InstructionSelector::VisitWord32Shr(Node* node) {
VisitRRO(this, kS390_ShiftRight32, node, kShift32Imm);
VisitBin32op(this, node, kS390_ShiftRight32, BitWiseOperandMode);
}
#if V8_TARGET_ARCH_S390X
@ -749,7 +993,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
}
}
}
VisitRRO(this, kS390_ShiftRight64, node, kShift64Imm);
VisitRRO(this, kS390_ShiftRight64, node, OperandMode::kShift64Imm);
}
#endif
@ -760,16 +1004,20 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(16) && m.right().Is(16)) {
Emit(kS390_ExtendSignWord16, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()));
bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
Emit(kS390_ExtendSignWord16,
doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
return;
} else if (mleft.right().Is(24) && m.right().Is(24)) {
Emit(kS390_ExtendSignWord8, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()));
bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
Emit(kS390_ExtendSignWord8,
doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
return;
}
}
VisitRRO(this, kS390_ShiftRightArith32, node, kShift32Imm);
VisitBin32op(this, node, kS390_ShiftRightArith32, BitWiseOperandMode);
}
#if !V8_TARGET_ARCH_S390X
@ -795,7 +1043,7 @@ void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
// instruction.
selector->Emit(opcode2, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(2)));
g.UseRegister(node->InputAt(2)), g.TempImmediate(0));
}
}
@ -825,7 +1073,8 @@ void InstructionSelector::VisitInt32PairMul(Node* node) {
// The high word of the result is not used, so we emit the standard 32 bit
// instruction.
Emit(kS390_Mul32, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2)));
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(2)),
g.TempImmediate(0));
}
}
@ -881,17 +1130,20 @@ void InstructionSelector::VisitWord32PairSar(Node* node) {
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Sar(Node* node) {
VisitRRO(this, kS390_ShiftRightArith64, node, kShift64Imm);
VisitRRO(this, kS390_ShiftRightArith64, node, OperandMode::kShift64Imm);
}
#endif
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kS390_RotRight32, node, kShift32Imm);
// TODO(john): match dst = ror(src1, src2 + imm)
VisitBin32op(this, node, kS390_RotRight32,
OperandMode::kAllowRI | OperandMode::kAllowRRR |
OperandMode::kAllowRRI | OperandMode::kShift32Imm);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Ror(Node* node) {
VisitRRO(this, kS390_RotRight64, node, kShift64Imm);
VisitRRO(this, kS390_RotRight64, node, OperandMode::kShift64Imm);
}
#endif
@ -961,12 +1213,13 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
}
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm);
VisitBin32op(this, node, kS390_Add32, AddOperandMode);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt64Add(Node* node) {
VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm);
VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
OperandMode::kInt32Imm);
}
#endif
@ -974,10 +1227,12 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().Is(0)) {
Emit(kS390_Neg32, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
Node* right = m.right().node();
bool doZeroExt = ZeroExtendsWord32ToWord64(right);
Emit(kS390_Neg32, g.DefineAsRegister(node), g.UseRegister(right),
g.TempImmediate(doZeroExt));
} else {
VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate);
VisitBin32op(this, node, kS390_Sub32, SubOperandMode);
}
}
@ -989,7 +1244,8 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
Emit(kS390_Neg64, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
} else {
VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate);
VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
OperandMode::kInt32Imm_Negate);
}
}
#endif
@ -999,35 +1255,14 @@ namespace {
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont);
void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
S390OperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand result_operand = g.DefineAsRegister(node);
InstructionOperand high32_operand = g.TempRegister();
InstructionOperand temp_operand = g.TempRegister();
{
InstructionOperand outputs[] = {result_operand, high32_operand};
InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
g.UseRegister(m.right().node())};
selector->Emit(kS390_Mul32WithHigh32, 2, outputs, 2, inputs);
}
{
InstructionOperand shift_31 = g.UseImmediate(31);
InstructionOperand outputs[] = {temp_operand};
InstructionOperand inputs[] = {result_operand, shift_31};
selector->Emit(kS390_ShiftRightArith32, 1, outputs, 2, inputs);
}
VisitCompare(selector, kS390_Cmp32, high32_operand, temp_operand, cont);
}
#if V8_TARGET_ARCH_S390X
void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
S390OperandGenerator g(selector);
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
if (g.CanBeImmediate(right, kInt32Imm)) {
if (g.CanBeImmediate(right, OperandMode::kInt32Imm)) {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
g.UseImmediate(right));
} else {
@ -1038,17 +1273,18 @@ void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
g.Use(right));
}
}
#endif
} // namespace
void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
return EmitInt32MulWithOverflow(this, node, &cont);
return VisitBin32op(this, node, kS390_Mul32WithOverflow,
OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
&cont);
}
VisitMul(this, node, kS390_Mul32);
// FlagsContinuation cont;
// EmitInt32MulWithOverflow(this, node, &cont);
VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
}
void InstructionSelector::VisitInt32Mul(Node* node) {
@ -1056,14 +1292,20 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
if (g.CanBeImmediate(right, kInt32Imm) &&
if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
base::bits::IsPowerOfTwo32(g.GetImmediate(right))) {
int power = 31 - base::bits::CountLeadingZeros32(g.GetImmediate(right));
Emit(kS390_ShiftLeft32, g.DefineSameAsFirst(node), g.UseRegister(left),
g.UseImmediate(power));
bool doZeroExt = !ZeroExtendsWord32ToWord64(left);
InstructionOperand dst =
(doZeroExt && CpuFeatures::IsSupported(DISTINCT_OPS))
? g.DefineAsRegister(node)
: g.DefineSameAsFirst(node);
Emit(kS390_ShiftLeft32, dst, g.UseRegister(left), g.UseImmediate(power),
g.TempImmediate(doZeroExt));
return;
}
VisitMul(this, node, kS390_Mul32);
VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
}
#if V8_TARGET_ARCH_S390X
@ -1072,7 +1314,7 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
Int64BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
if (g.CanBeImmediate(right, kInt32Imm) &&
if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
Emit(kS390_ShiftLeft64, g.DefineSameAsFirst(node), g.UseRegister(left),
@ -1084,15 +1326,8 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
#endif
void InstructionSelector::VisitInt32MulHigh(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
if (g.CanBeBetterLeftOperand(right)) {
std::swap(left, right);
}
Emit(kS390_MulHigh32, g.DefineAsRegister(node), g.UseRegister(left),
g.Use(right));
VisitBin32op(this, node, kS390_MulHigh32,
OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps);
}
void InstructionSelector::VisitUint32MulHigh(Node* node) {
@ -1108,7 +1343,8 @@ void InstructionSelector::VisitUint32MulHigh(Node* node) {
}
void InstructionSelector::VisitInt32Div(Node* node) {
VisitRRR(this, kS390_Div32, node);
VisitBin32op(this, node, kS390_Div32,
OperandMode::kAllowRRM | OperandMode::kAllowRRR);
}
#if V8_TARGET_ARCH_S390X
@ -1118,7 +1354,8 @@ void InstructionSelector::VisitInt64Div(Node* node) {
#endif
void InstructionSelector::VisitUint32Div(Node* node) {
VisitRRR(this, kS390_DivU32, node);
VisitBin32op(this, node, kS390_DivU32,
OperandMode::kAllowRRM | OperandMode::kAllowRRR);
}
#if V8_TARGET_ARCH_S390X
@ -1202,7 +1439,13 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
}
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
S390OperandGenerator g(this);
Node* value = node->InputAt(0);
if (ZeroExtendsWord32ToWord64(value)) {
// These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
// zero-extension is a no-op.
return EmitIdentity(node);
}
VisitRR(this, kS390_Uint32ToUint64, node);
}
#endif
@ -1408,46 +1651,46 @@ void InstructionSelector::VisitFloat64Neg(Node* node) {
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
OperandModes mode = AddOperandMode;
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm,
&cont);
return VisitBin32op(this, node, kS390_Add32, mode, &cont);
}
FlagsContinuation cont;
VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm, &cont);
VisitBin32op(this, node, kS390_Add32, mode, &cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
OperandModes mode = SubOperandMode;
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32,
kInt32Imm_Negate, &cont);
return VisitBin32op(this, node, kS390_Sub32, mode, &cont);
}
FlagsContinuation cont;
VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate,
&cont);
VisitBin32op(this, node, kS390_Sub32, mode, &cont);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm,
&cont);
return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
OperandMode::kInt32Imm, &cont);
}
FlagsContinuation cont;
VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm, &cont);
VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, OperandMode::kInt32Imm,
&cont);
}
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
kInt32Imm_Negate, &cont);
OperandMode::kInt32Imm_Negate, &cont);
}
FlagsContinuation cont;
VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate,
&cont);
VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
OperandMode::kInt32Imm_Negate, &cont);
}
#endif
@ -1488,7 +1731,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont,
bool commutative, ImmediateMode immediate_mode) {
bool commutative, OperandModes immediate_mode) {
S390OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
@ -1509,14 +1752,16 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kInt32Imm);
OperandModes mode =
(CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
VisitWordCompare(selector, node, kS390_Cmp32, cont, false, mode);
}
#if V8_TARGET_ARCH_S390X
void VisitWord64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kUint32Imm);
OperandModes mode =
(CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
VisitWordCompare(selector, node, kS390_Cmp64, cont, false, mode);
}
#endif
@ -1571,7 +1816,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
return VisitWord32Compare(selector, value, cont);
case IrOpcode::kWord32And:
return VisitWordCompare(selector, value, kS390_Tst64, cont,
true, kUint32Imm);
true, OperandMode::kUint32Imm);
default:
break;
}
@ -1605,7 +1850,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kWord64And:
return VisitWordCompare(selector, value, kS390_Tst64, cont,
true, kUint32Imm);
true, OperandMode::kUint32Imm);
default:
break;
}
@ -1659,24 +1904,28 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int32BinopMatcher>(
selector, node, kS390_Add32, kInt32Imm, cont);
return VisitBin32op(selector, node, kS390_Add32, AddOperandMode,
cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int32BinopMatcher>(
selector, node, kS390_Sub32, kInt32Imm_Negate, cont);
return VisitBin32op(selector, node, kS390_Sub32, SubOperandMode,
cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kNotEqual);
return EmitInt32MulWithOverflow(selector, node, cont);
return VisitBin32op(
selector, node, kS390_Mul32WithOverflow,
OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
cont);
#if V8_TARGET_ARCH_S390X
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int64BinopMatcher>(
selector, node, kS390_Add64, kInt32Imm, cont);
selector, node, kS390_Add64, OperandMode::kInt32Imm, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int64BinopMatcher>(
selector, node, kS390_Sub64, kInt32Imm_Negate, cont);
selector, node, kS390_Sub64, OperandMode::kInt32Imm_Negate,
cont);
#endif
default:
break;
@ -1688,7 +1937,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
return VisitWord32Compare(selector, value, cont);
case IrOpcode::kWord32And:
return VisitWordCompare(selector, value, kS390_Tst32, cont, true,
kUint32Imm);
OperandMode::kUint32Imm);
// TODO(mbrandy): Handle?
// case IrOpcode::kInt32Add:
// case IrOpcode::kWord32Or:
@ -1702,7 +1951,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kWord64And:
return VisitWordCompare(selector, value, kS390_Tst64, cont, true,
kUint32Imm);
OperandMode::kUint32Imm);
// TODO(mbrandy): Handle?
// case IrOpcode::kInt64Add:
// case IrOpcode::kWord64Or:
@ -1780,9 +2029,14 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kS390_Sub32, index_operand, value_operand,
g.TempImmediate(sw.min_value));
Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
value_operand, g.TempImmediate(-sw.min_value));
}
#if V8_TARGET_ARCH_S390X
InstructionOperand index_operand_zero_ext = g.TempRegister();
Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
index_operand = index_operand_zero_ext;
#endif
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}

View File

@ -1892,6 +1892,10 @@ void Assembler::msg(Register r1, const MemOperand& opnd) {
rxy_form(MSG, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
void Assembler::msgf(Register r1, const MemOperand& opnd) {
rxy_form(MSGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
// --------------------------
// 32-bit Divide Instructions
// --------------------------
@ -1902,7 +1906,15 @@ void Assembler::d(Register r1, const MemOperand& opnd) {
// Divide Logical Register-Storage (32<-64)
void Assembler::dl(Register r1, const MemOperand& opnd) {
rx_form(DL, r1, opnd.rx(), opnd.rb(), opnd.offset());
rxy_form(DL, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
void Assembler::dsg(Register r1, const MemOperand& opnd) {
rxy_form(DSG, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
void Assembler::dsgf(Register r1, const MemOperand& opnd) {
rxy_form(DSGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
// --------------------

View File

@ -988,6 +988,9 @@ class Assembler : public AssemblerBase {
RI1_FORM(nill);
RI1_FORM(oill);
RXY_FORM(pfd);
RXY_FORM(dsgf);
RXY_FORM(msgf);
RXY_FORM(dsg);
RXE_FORM(sdb);
RXY_FORM(slgf);
RS1_FORM(srdl);

View File

@ -712,6 +712,9 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case XGRK:
Format(instr, "xgrk\t'r5,'r6,'r3");
break;
case CGFR:
Format(instr, "cgfr\t'r5,'r6");
break;
case CGR:
Format(instr, "cgr\t'r5,'r6");
break;
@ -775,6 +778,12 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case DSGR:
Format(instr, "dsgr\t'r5,'r6");
break;
case DSGFR:
Format(instr, "dsgfr\t'r5,'r6");
break;
case MSGFR:
Format(instr, "msgfr\t'r5,'r6");
break;
case LZDR:
Format(instr, "lzdr\t'f5");
break;
@ -1391,6 +1400,15 @@ bool Decoder::DecodeSixByte(Instruction* instr) {
case MSG:
Format(instr, "msg\t'r1,'d2('r2d,'r3)");
break;
case DSG:
Format(instr, "dsg\t'r1,'d2('r2d,'r3)");
break;
case DSGF:
Format(instr, "dsgf\t'r1,'d2('r2d,'r3)");
break;
case MSGF:
Format(instr, "msgf\t'r1,'d2('r2d,'r3)");
break;
case MSY:
Format(instr, "msy\t'r1,'d2('r2d,'r3)");
break;

View File

@ -3270,6 +3270,53 @@ void MacroAssembler::Mul32(Register dst, const Operand& src1) {
msfi(dst, src1);
}
void MacroAssembler::MulHigh32(Register dst, Register src1,
const MemOperand& src2) {
lgfr(dst, src1);
msgf(dst, src2);
srlg(dst, dst, Operand(32));
}
void MacroAssembler::MulHigh32(Register dst, Register src1, Register src2) {
if (dst.is(src2)) {
std::swap(src1, src2);
}
lgfr(dst, src1);
msgfr(dst, src2);
srlg(dst, dst, Operand(32));
}
void MacroAssembler::MulHigh32(Register dst, Register src1,
const Operand& src2) {
lgfr(dst, src1);
msgfi(dst, src2);
srlg(dst, dst, Operand(32));
}
void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
const MemOperand& src2) {
lgfr(dst, src1);
msgf(dst, src2);
cgfr(dst, dst);
}
void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
Register src2) {
if (dst.is(src2)) {
std::swap(src1, src2);
}
lgfr(dst, src1);
msgfr(dst, src2);
cgfr(dst, dst);
}
void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
const Operand& src2) {
lgfr(dst, src1);
msgfi(dst, src2);
cgfr(dst, dst);
}
void MacroAssembler::Mul64(Register dst, const MemOperand& src1) {
if (is_int20(src1.offset())) {
msg(dst, src1);
@ -3362,6 +3409,12 @@ void MacroAssembler::Add32(Register dst, const Operand& opnd) {
afi(dst, opnd);
}
// Add 32-bit (Register dst = Register dst + Immediate opnd)
void MacroAssembler::Add32_RI(Register dst, const Operand& opnd) {
// Just a wrapper for above
Add32(dst, opnd);
}
// Add Pointer Size (Register dst = Register dst + Immediate opnd)
void MacroAssembler::AddP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
@ -3386,6 +3439,13 @@ void MacroAssembler::Add32(Register dst, Register src, const Operand& opnd) {
Add32(dst, opnd);
}
// Add 32-bit (Register dst = Register src + Immediate opnd)
void MacroAssembler::Add32_RRI(Register dst, Register src,
const Operand& opnd) {
// Just a wrapper for above
Add32(dst, src, opnd);
}
// Add Pointer Size (Register dst = Register src + Immediate opnd)
void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) {
@ -4134,12 +4194,24 @@ void MacroAssembler::Load(Register dst, const Operand& opnd) {
#else
lhi(dst, opnd);
#endif
} else {
} else if (is_int32(value)) {
#if V8_TARGET_ARCH_S390X
lgfi(dst, opnd);
#else
iilf(dst, opnd);
#endif
} else if (is_uint32(value)) {
#if V8_TARGET_ARCH_S390X
llilf(dst, opnd);
#else
iilf(dst, opnd);
#endif
} else {
int32_t hi_32 = static_cast<int64_t>(value) >> 32;
int32_t lo_32 = static_cast<int32_t>(value);
iihf(dst, Operand(hi_32));
iilf(dst, Operand(lo_32));
}
}

View File

@ -245,8 +245,10 @@ class MacroAssembler : public Assembler {
// Add (Register - Immediate)
void Add32(Register dst, const Operand& imm);
void Add32_RI(Register dst, const Operand& imm);
void AddP(Register dst, const Operand& imm);
void Add32(Register dst, Register src, const Operand& imm);
void Add32_RRI(Register dst, Register src, const Operand& imm);
void AddP(Register dst, Register src, const Operand& imm);
// Add (Register - Register)
@ -282,8 +284,12 @@ class MacroAssembler : public Assembler {
// Subtract (Register - Immediate)
void Sub32(Register dst, const Operand& imm);
void Sub32_RI(Register dst, const Operand& imm) { Sub32(dst, imm); }
void SubP(Register dst, const Operand& imm);
void Sub32(Register dst, Register src, const Operand& imm);
void Sub32_RRI(Register dst, Register src, const Operand& imm) {
Sub32(dst, src, imm);
}
void SubP(Register dst, Register src, const Operand& imm);
// Subtract (Register - Register)
@ -316,6 +322,14 @@ class MacroAssembler : public Assembler {
void Mul32(Register dst, const MemOperand& src1);
void Mul32(Register dst, Register src1);
void Mul32(Register dst, const Operand& src1);
void MulHigh32(Register dst, Register src1, const MemOperand& src2);
void MulHigh32(Register dst, Register src1, Register src2);
void MulHigh32(Register dst, Register src1, const Operand& src2);
void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
const MemOperand& src2);
void Mul32WithOverflowIfCCUnequal(Register dst, Register src1, Register src2);
void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
const Operand& src2);
void Mul64(Register dst, const MemOperand& src1);
void Mul64(Register dst, Register src1);
void Mul64(Register dst, const Operand& src1);

View File

@ -10117,15 +10117,26 @@ EVALUATE(SLGFR) {
}
EVALUATE(MSGFR) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(MSGFR);
DECODE_RRE_INSTRUCTION(r1, r2);
int64_t r1_val = get_register(r1);
int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
int64_t product = r1_val * r2_val;
set_register(r1, product);
return length;
}
EVALUATE(DSGFR) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(DSGFR);
DECODE_RRE_INSTRUCTION(r1, r2);
DCHECK(r1 % 2 == 0);
int64_t r1_val = get_register(r1 + 1);
int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
int64_t quotient = r1_val / r2_val;
int64_t remainder = r1_val % r2_val;
set_register(r1, remainder);
set_register(r1 + 1, quotient);
return length;
}
EVALUATE(KMAC) {
@ -10201,9 +10212,13 @@ EVALUATE(KMC) {
}
EVALUATE(CGFR) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(CGFR);
DECODE_RRE_INSTRUCTION(r1, r2);
// Compare (64)
int64_t r1_val = get_register(r1);
int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
SetS390ConditionCode<int64_t>(r1_val, r2_val);
return length;
}
EVALUATE(KIMD) {
@ -11031,15 +11046,34 @@ EVALUATE(SLGF) {
}
EVALUATE(MSGF) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(MSGF);
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
int64_t mem_val =
static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val, instr));
int64_t r1_val = get_register(r1);
int64_t product = r1_val * mem_val;
set_register(r1, product);
return length;
}
EVALUATE(DSGF) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(DSGF);
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
DCHECK(r1 % 2 == 0);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
int64_t mem_val =
static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val, instr));
int64_t r1_val = get_register(r1 + 1);
int64_t quotient = r1_val / mem_val;
int64_t remainder = r1_val % mem_val;
set_register(r1, remainder);
set_register(r1 + 1, quotient);
return length;
}
EVALUATE(LRVG) {
@ -11598,9 +11632,20 @@ EVALUATE(ML) {
}
EVALUATE(DL) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(ML);
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
DCHECK(r1 % 2 == 0);
uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
uint64_t quotient =
static_cast<uint64_t>(r1_val) / static_cast<uint64_t>(mem_val);
uint64_t remainder =
static_cast<uint64_t>(r1_val) % static_cast<uint64_t>(mem_val);
set_low_register(r1, remainder);
set_low_register(r1 + 1, quotient);
return length;
}
EVALUATE(ALC) {