[arm] Implement I64Atomic operations on Arm
- Implement all the I64Atomic operations on ARM - Change assembler methods to use Registers instead of memory operands - Move atomics64 test up be tested on all archs, disable tests on MIPS BUG:v8:6532 Change-Id: I91bd42fa819f194be15c719266c36230f9c65db8 Reviewed-on: https://chromium-review.googlesource.com/1180211 Commit-Queue: Deepti Gandluri <gdeepti@chromium.org> Reviewed-by: Bill Budge <bbudge@chromium.org> Reviewed-by: Ben Smith <binji@chromium.org> Cr-Commit-Position: refs/heads/master@{#55416}
This commit is contained in:
parent
4ac19c3882
commit
8fe01fea4e
@ -2240,29 +2240,27 @@ void Assembler::strexh(Register src1, Register src2, Register dst,
|
||||
0xF9 * B4 | src2.code());
|
||||
}
|
||||
|
||||
void Assembler::ldrexd(Register dst1, Register dst2, const MemOperand& src,
|
||||
void Assembler::ldrexd(Register dst1, Register dst2, Register src,
|
||||
Condition cond) {
|
||||
// cond(31-28) | 00011011(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
|
||||
DCHECK(src.rm() == no_reg);
|
||||
DCHECK(dst1 != lr); // r14.
|
||||
// The pair of destination registers is restricted to being an even-numbered
|
||||
// register and the odd-numbered register that immediately follows it.
|
||||
DCHECK_EQ(0, dst1.code() % 2);
|
||||
DCHECK_EQ(dst1.code() + 1, dst2.code());
|
||||
emit(cond | B24 | B23 | B21 | B20 | src.rn_.code() * B16 | dst1.code() * B12 |
|
||||
emit(cond | B24 | B23 | B21 | B20 | src.code() * B16 | dst1.code() * B12 |
|
||||
0xF9F);
|
||||
}
|
||||
|
||||
void Assembler::strexd(Register res, Register src1, Register src2,
|
||||
const MemOperand& dst, Condition cond) {
|
||||
void Assembler::strexd(Register res, Register src1, Register src2, Register dst,
|
||||
Condition cond) {
|
||||
// cond(31-28) | 00011010(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
|
||||
DCHECK(dst.rm() == no_reg);
|
||||
DCHECK(src1 != lr); // r14.
|
||||
// The pair of source registers is restricted to being an even-numbered
|
||||
// register and the odd-numbered register that immediately follows it.
|
||||
DCHECK_EQ(0, src1.code() % 2);
|
||||
DCHECK_EQ(src1.code() + 1, src2.code());
|
||||
emit(cond | B24 | B23 | B21 | dst.rn_.code() * B16 | res.code() * B12 |
|
||||
emit(cond | B24 | B23 | B21 | dst.code() * B16 | res.code() * B12 |
|
||||
0xF9 * B4 | src1.code());
|
||||
}
|
||||
|
||||
|
@ -930,9 +930,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
void strexb(Register src1, Register src2, Register dst, Condition cond = al);
|
||||
void ldrexh(Register dst, Register src, Condition cond = al);
|
||||
void strexh(Register src1, Register src2, Register dst, Condition cond = al);
|
||||
void ldrexd(Register dst1, Register dst2, const MemOperand& src,
|
||||
Condition cond = al);
|
||||
void strexd(Register res, Register src1, Register src2, const MemOperand& dst,
|
||||
void ldrexd(Register dst1, Register dst2, Register src, Condition cond = al);
|
||||
void strexd(Register res, Register src1, Register src2, Register dst,
|
||||
Condition cond = al);
|
||||
|
||||
// Preload instructions
|
||||
|
@ -416,6 +416,48 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
|
||||
__ dmb(ISH); \
|
||||
} while (0)
|
||||
|
||||
#define ASSEMBLE_ATOMIC64_ARITH_BINOP(instr1, instr2) \
|
||||
do { \
|
||||
Label binop; \
|
||||
__ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3)); \
|
||||
__ dmb(ISH); \
|
||||
__ bind(&binop); \
|
||||
__ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0)); \
|
||||
__ instr1(i.TempRegister(1), i.OutputRegister(0), i.InputRegister(0), \
|
||||
SBit::SetCC); \
|
||||
__ instr2(i.TempRegister(2), i.OutputRegister(1), \
|
||||
Operand(i.InputRegister(1))); \
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
|
||||
__ strexd(i.TempRegister(3), i.TempRegister(1), i.TempRegister(2), \
|
||||
i.TempRegister(0)); \
|
||||
__ teq(i.TempRegister(3), Operand(0)); \
|
||||
__ b(ne, &binop); \
|
||||
__ dmb(ISH); \
|
||||
} while (0)
|
||||
|
||||
#define ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr) \
|
||||
do { \
|
||||
Label binop; \
|
||||
__ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3)); \
|
||||
__ dmb(ISH); \
|
||||
__ bind(&binop); \
|
||||
__ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0)); \
|
||||
__ instr(i.TempRegister(1), i.OutputRegister(0), \
|
||||
Operand(i.InputRegister(0))); \
|
||||
__ instr(i.TempRegister(2), i.OutputRegister(1), \
|
||||
Operand(i.InputRegister(1))); \
|
||||
__ strexd(i.TempRegister(3), i.TempRegister(1), i.TempRegister(2), \
|
||||
i.TempRegister(0)); \
|
||||
__ teq(i.TempRegister(3), Operand(0)); \
|
||||
__ b(ne, &binop); \
|
||||
__ dmb(ISH); \
|
||||
} while (0)
|
||||
|
||||
#define ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op) \
|
||||
if (arch_opcode == kArmWord64AtomicNarrow##op) { \
|
||||
__ mov(i.OutputRegister(1), Operand(0)); \
|
||||
}
|
||||
|
||||
#define ASSEMBLE_IEEE754_BINOP(name) \
|
||||
do { \
|
||||
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
|
||||
@ -2628,7 +2670,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
case kWord32AtomicLoadWord32:
|
||||
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
|
||||
break;
|
||||
|
||||
case kWord32AtomicStoreWord8:
|
||||
ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
|
||||
break;
|
||||
@ -2643,17 +2684,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
|
||||
break;
|
||||
case kWord32AtomicExchangeUint8:
|
||||
case kArmWord64AtomicNarrowExchangeUint8:
|
||||
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
|
||||
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint8);
|
||||
break;
|
||||
case kWord32AtomicExchangeInt16:
|
||||
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
|
||||
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
|
||||
break;
|
||||
case kWord32AtomicExchangeUint16:
|
||||
case kArmWord64AtomicNarrowExchangeUint16:
|
||||
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
|
||||
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint16);
|
||||
break;
|
||||
case kWord32AtomicExchangeWord32:
|
||||
case kArmWord64AtomicNarrowExchangeUint32:
|
||||
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
|
||||
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint32);
|
||||
break;
|
||||
case kWord32AtomicCompareExchangeInt8:
|
||||
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
|
||||
@ -2663,10 +2710,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
|
||||
break;
|
||||
case kWord32AtomicCompareExchangeUint8:
|
||||
case kArmWord64AtomicNarrowCompareExchangeUint8:
|
||||
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
|
||||
__ uxtb(i.TempRegister(2), i.InputRegister(2));
|
||||
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
|
||||
i.TempRegister(2));
|
||||
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint8);
|
||||
break;
|
||||
case kWord32AtomicCompareExchangeInt16:
|
||||
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
|
||||
@ -2676,15 +2725,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
|
||||
break;
|
||||
case kWord32AtomicCompareExchangeUint16:
|
||||
case kArmWord64AtomicNarrowCompareExchangeUint16:
|
||||
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
|
||||
__ uxth(i.TempRegister(2), i.InputRegister(2));
|
||||
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
|
||||
i.TempRegister(2));
|
||||
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint16);
|
||||
break;
|
||||
case kWord32AtomicCompareExchangeWord32:
|
||||
case kArmWord64AtomicNarrowCompareExchangeUint32:
|
||||
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
|
||||
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
|
||||
i.InputRegister(2));
|
||||
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint32);
|
||||
break;
|
||||
#define ATOMIC_BINOP_CASE(op, inst) \
|
||||
case kWord32Atomic##op##Int8: \
|
||||
@ -2692,17 +2745,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
|
||||
break; \
|
||||
case kWord32Atomic##op##Uint8: \
|
||||
case kArmWord64AtomicNarrow##op##Uint8: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
|
||||
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint8); \
|
||||
break; \
|
||||
case kWord32Atomic##op##Int16: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
|
||||
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); \
|
||||
break; \
|
||||
case kWord32Atomic##op##Uint16: \
|
||||
case kArmWord64AtomicNarrow##op##Uint16: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
|
||||
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint16); \
|
||||
break; \
|
||||
case kWord32Atomic##op##Word32: \
|
||||
case kArmWord64AtomicNarrow##op##Uint32: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \
|
||||
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint32); \
|
||||
break;
|
||||
ATOMIC_BINOP_CASE(Add, add)
|
||||
ATOMIC_BINOP_CASE(Sub, sub)
|
||||
@ -2710,11 +2769,81 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
ATOMIC_BINOP_CASE(Or, orr)
|
||||
ATOMIC_BINOP_CASE(Xor, eor)
|
||||
#undef ATOMIC_BINOP_CASE
|
||||
case kArmWord32AtomicPairLoad:
|
||||
__ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
|
||||
__ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0));
|
||||
__ dmb(ISH);
|
||||
break;
|
||||
case kArmWord32AtomicPairStore: {
|
||||
Label store;
|
||||
__ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
|
||||
__ dmb(ISH);
|
||||
__ bind(&store);
|
||||
__ ldrexd(i.TempRegister(1), i.TempRegister(2), i.TempRegister(0));
|
||||
__ strexd(i.TempRegister(1), i.InputRegister(2), i.InputRegister(3),
|
||||
i.TempRegister(0));
|
||||
__ teq(i.TempRegister(1), Operand(0));
|
||||
__ b(ne, &store);
|
||||
__ dmb(ISH);
|
||||
break;
|
||||
}
|
||||
#define ATOMIC_ARITH_BINOP_CASE(op, instr1, instr2) \
|
||||
case kArmWord32AtomicPair##op: { \
|
||||
ASSEMBLE_ATOMIC64_ARITH_BINOP(instr1, instr2); \
|
||||
break; \
|
||||
}
|
||||
ATOMIC_ARITH_BINOP_CASE(Add, add, adc)
|
||||
ATOMIC_ARITH_BINOP_CASE(Sub, sub, sbc)
|
||||
#undef ATOMIC_ARITH_BINOP_CASE
|
||||
#define ATOMIC_LOGIC_BINOP_CASE(op, instr) \
|
||||
case kArmWord32AtomicPair##op: { \
|
||||
ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr); \
|
||||
break; \
|
||||
}
|
||||
ATOMIC_LOGIC_BINOP_CASE(And, and_)
|
||||
ATOMIC_LOGIC_BINOP_CASE(Or, orr)
|
||||
ATOMIC_LOGIC_BINOP_CASE(Xor, eor)
|
||||
case kArmWord32AtomicPairExchange: {
|
||||
Label exchange;
|
||||
__ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3));
|
||||
__ dmb(ISH);
|
||||
__ bind(&exchange);
|
||||
__ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0));
|
||||
__ strexd(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1),
|
||||
i.TempRegister(0));
|
||||
__ teq(i.TempRegister(1), Operand(0));
|
||||
__ b(ne, &exchange);
|
||||
__ dmb(ISH);
|
||||
break;
|
||||
}
|
||||
case kArmWord32AtomicPairCompareExchange: {
|
||||
__ add(i.TempRegister(0), i.InputRegister(4), i.InputRegister(5));
|
||||
Label compareExchange;
|
||||
Label exit;
|
||||
__ dmb(ISH);
|
||||
__ bind(&compareExchange);
|
||||
__ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0));
|
||||
__ teq(i.InputRegister(0), Operand(i.OutputRegister(0)));
|
||||
__ b(ne, &exit);
|
||||
__ teq(i.InputRegister(1), Operand(i.OutputRegister(1)));
|
||||
__ b(ne, &exit);
|
||||
__ strexd(i.TempRegister(1), i.InputRegister(2), i.InputRegister(3),
|
||||
i.TempRegister(0));
|
||||
__ teq(i.TempRegister(1), Operand(0));
|
||||
__ b(ne, &compareExchange);
|
||||
__ bind(&exit);
|
||||
__ dmb(ISH);
|
||||
break;
|
||||
}
|
||||
#undef ATOMIC_LOGIC_BINOP_CASE
|
||||
#undef ATOMIC_NARROW_OP_CLEAR_HIGH_WORD
|
||||
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
|
||||
#undef ASSEMBLE_ATOMIC_STORE_INTEGER
|
||||
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
|
||||
#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
|
||||
#undef ASSEMBLE_ATOMIC_BINOP
|
||||
#undef ASSEMBLE_ATOMIC64_ARITH_BINOP
|
||||
#undef ASSEMBLE_ATOMIC64_LOGIC_BINOP
|
||||
#undef ASSEMBLE_IEEE754_BINOP
|
||||
#undef ASSEMBLE_IEEE754_UNOP
|
||||
#undef ASSEMBLE_NEON_NARROWING_OP
|
||||
|
@ -11,265 +11,295 @@ namespace compiler {
|
||||
|
||||
// ARM-specific opcodes that specify which assembly sequence to emit.
|
||||
// Most opcodes specify a single instruction.
|
||||
#define TARGET_ARCH_OPCODE_LIST(V) \
|
||||
V(ArmAdd) \
|
||||
V(ArmAnd) \
|
||||
V(ArmBic) \
|
||||
V(ArmClz) \
|
||||
V(ArmCmp) \
|
||||
V(ArmCmn) \
|
||||
V(ArmTst) \
|
||||
V(ArmTeq) \
|
||||
V(ArmOrr) \
|
||||
V(ArmEor) \
|
||||
V(ArmSub) \
|
||||
V(ArmRsb) \
|
||||
V(ArmMul) \
|
||||
V(ArmMla) \
|
||||
V(ArmMls) \
|
||||
V(ArmSmull) \
|
||||
V(ArmSmmul) \
|
||||
V(ArmSmmla) \
|
||||
V(ArmUmull) \
|
||||
V(ArmSdiv) \
|
||||
V(ArmUdiv) \
|
||||
V(ArmMov) \
|
||||
V(ArmMvn) \
|
||||
V(ArmBfc) \
|
||||
V(ArmUbfx) \
|
||||
V(ArmSbfx) \
|
||||
V(ArmSxtb) \
|
||||
V(ArmSxth) \
|
||||
V(ArmSxtab) \
|
||||
V(ArmSxtah) \
|
||||
V(ArmUxtb) \
|
||||
V(ArmUxth) \
|
||||
V(ArmUxtab) \
|
||||
V(ArmRbit) \
|
||||
V(ArmRev) \
|
||||
V(ArmUxtah) \
|
||||
V(ArmAddPair) \
|
||||
V(ArmSubPair) \
|
||||
V(ArmMulPair) \
|
||||
V(ArmLslPair) \
|
||||
V(ArmLsrPair) \
|
||||
V(ArmAsrPair) \
|
||||
V(ArmVcmpF32) \
|
||||
V(ArmVaddF32) \
|
||||
V(ArmVsubF32) \
|
||||
V(ArmVmulF32) \
|
||||
V(ArmVmlaF32) \
|
||||
V(ArmVmlsF32) \
|
||||
V(ArmVdivF32) \
|
||||
V(ArmVabsF32) \
|
||||
V(ArmVnegF32) \
|
||||
V(ArmVsqrtF32) \
|
||||
V(ArmVcmpF64) \
|
||||
V(ArmVaddF64) \
|
||||
V(ArmVsubF64) \
|
||||
V(ArmVmulF64) \
|
||||
V(ArmVmlaF64) \
|
||||
V(ArmVmlsF64) \
|
||||
V(ArmVdivF64) \
|
||||
V(ArmVmodF64) \
|
||||
V(ArmVabsF64) \
|
||||
V(ArmVnegF64) \
|
||||
V(ArmVsqrtF64) \
|
||||
V(ArmVrintmF32) \
|
||||
V(ArmVrintmF64) \
|
||||
V(ArmVrintpF32) \
|
||||
V(ArmVrintpF64) \
|
||||
V(ArmVrintzF32) \
|
||||
V(ArmVrintzF64) \
|
||||
V(ArmVrintaF64) \
|
||||
V(ArmVrintnF32) \
|
||||
V(ArmVrintnF64) \
|
||||
V(ArmVcvtF32F64) \
|
||||
V(ArmVcvtF64F32) \
|
||||
V(ArmVcvtF32S32) \
|
||||
V(ArmVcvtF32U32) \
|
||||
V(ArmVcvtF64S32) \
|
||||
V(ArmVcvtF64U32) \
|
||||
V(ArmVcvtS32F32) \
|
||||
V(ArmVcvtU32F32) \
|
||||
V(ArmVcvtS32F64) \
|
||||
V(ArmVcvtU32F64) \
|
||||
V(ArmVmovU32F32) \
|
||||
V(ArmVmovF32U32) \
|
||||
V(ArmVmovLowU32F64) \
|
||||
V(ArmVmovLowF64U32) \
|
||||
V(ArmVmovHighU32F64) \
|
||||
V(ArmVmovHighF64U32) \
|
||||
V(ArmVmovF64U32U32) \
|
||||
V(ArmVmovU32U32F64) \
|
||||
V(ArmVldrF32) \
|
||||
V(ArmVstrF32) \
|
||||
V(ArmVldrF64) \
|
||||
V(ArmVld1F64) \
|
||||
V(ArmVstrF64) \
|
||||
V(ArmVst1F64) \
|
||||
V(ArmVld1S128) \
|
||||
V(ArmVst1S128) \
|
||||
V(ArmFloat32Max) \
|
||||
V(ArmFloat64Max) \
|
||||
V(ArmFloat32Min) \
|
||||
V(ArmFloat64Min) \
|
||||
V(ArmFloat64SilenceNaN) \
|
||||
V(ArmLdrb) \
|
||||
V(ArmLdrsb) \
|
||||
V(ArmStrb) \
|
||||
V(ArmLdrh) \
|
||||
V(ArmLdrsh) \
|
||||
V(ArmStrh) \
|
||||
V(ArmLdr) \
|
||||
V(ArmStr) \
|
||||
V(ArmPush) \
|
||||
V(ArmPoke) \
|
||||
V(ArmPeek) \
|
||||
V(ArmDsbIsb) \
|
||||
V(ArmF32x4Splat) \
|
||||
V(ArmF32x4ExtractLane) \
|
||||
V(ArmF32x4ReplaceLane) \
|
||||
V(ArmF32x4SConvertI32x4) \
|
||||
V(ArmF32x4UConvertI32x4) \
|
||||
V(ArmF32x4Abs) \
|
||||
V(ArmF32x4Neg) \
|
||||
V(ArmF32x4RecipApprox) \
|
||||
V(ArmF32x4RecipSqrtApprox) \
|
||||
V(ArmF32x4Add) \
|
||||
V(ArmF32x4AddHoriz) \
|
||||
V(ArmF32x4Sub) \
|
||||
V(ArmF32x4Mul) \
|
||||
V(ArmF32x4Min) \
|
||||
V(ArmF32x4Max) \
|
||||
V(ArmF32x4Eq) \
|
||||
V(ArmF32x4Ne) \
|
||||
V(ArmF32x4Lt) \
|
||||
V(ArmF32x4Le) \
|
||||
V(ArmI32x4Splat) \
|
||||
V(ArmI32x4ExtractLane) \
|
||||
V(ArmI32x4ReplaceLane) \
|
||||
V(ArmI32x4SConvertF32x4) \
|
||||
V(ArmI32x4SConvertI16x8Low) \
|
||||
V(ArmI32x4SConvertI16x8High) \
|
||||
V(ArmI32x4Neg) \
|
||||
V(ArmI32x4Shl) \
|
||||
V(ArmI32x4ShrS) \
|
||||
V(ArmI32x4Add) \
|
||||
V(ArmI32x4AddHoriz) \
|
||||
V(ArmI32x4Sub) \
|
||||
V(ArmI32x4Mul) \
|
||||
V(ArmI32x4MinS) \
|
||||
V(ArmI32x4MaxS) \
|
||||
V(ArmI32x4Eq) \
|
||||
V(ArmI32x4Ne) \
|
||||
V(ArmI32x4GtS) \
|
||||
V(ArmI32x4GeS) \
|
||||
V(ArmI32x4UConvertF32x4) \
|
||||
V(ArmI32x4UConvertI16x8Low) \
|
||||
V(ArmI32x4UConvertI16x8High) \
|
||||
V(ArmI32x4ShrU) \
|
||||
V(ArmI32x4MinU) \
|
||||
V(ArmI32x4MaxU) \
|
||||
V(ArmI32x4GtU) \
|
||||
V(ArmI32x4GeU) \
|
||||
V(ArmI16x8Splat) \
|
||||
V(ArmI16x8ExtractLane) \
|
||||
V(ArmI16x8ReplaceLane) \
|
||||
V(ArmI16x8SConvertI8x16Low) \
|
||||
V(ArmI16x8SConvertI8x16High) \
|
||||
V(ArmI16x8Neg) \
|
||||
V(ArmI16x8Shl) \
|
||||
V(ArmI16x8ShrS) \
|
||||
V(ArmI16x8SConvertI32x4) \
|
||||
V(ArmI16x8Add) \
|
||||
V(ArmI16x8AddSaturateS) \
|
||||
V(ArmI16x8AddHoriz) \
|
||||
V(ArmI16x8Sub) \
|
||||
V(ArmI16x8SubSaturateS) \
|
||||
V(ArmI16x8Mul) \
|
||||
V(ArmI16x8MinS) \
|
||||
V(ArmI16x8MaxS) \
|
||||
V(ArmI16x8Eq) \
|
||||
V(ArmI16x8Ne) \
|
||||
V(ArmI16x8GtS) \
|
||||
V(ArmI16x8GeS) \
|
||||
V(ArmI16x8UConvertI8x16Low) \
|
||||
V(ArmI16x8UConvertI8x16High) \
|
||||
V(ArmI16x8ShrU) \
|
||||
V(ArmI16x8UConvertI32x4) \
|
||||
V(ArmI16x8AddSaturateU) \
|
||||
V(ArmI16x8SubSaturateU) \
|
||||
V(ArmI16x8MinU) \
|
||||
V(ArmI16x8MaxU) \
|
||||
V(ArmI16x8GtU) \
|
||||
V(ArmI16x8GeU) \
|
||||
V(ArmI8x16Splat) \
|
||||
V(ArmI8x16ExtractLane) \
|
||||
V(ArmI8x16ReplaceLane) \
|
||||
V(ArmI8x16Neg) \
|
||||
V(ArmI8x16Shl) \
|
||||
V(ArmI8x16ShrS) \
|
||||
V(ArmI8x16SConvertI16x8) \
|
||||
V(ArmI8x16Add) \
|
||||
V(ArmI8x16AddSaturateS) \
|
||||
V(ArmI8x16Sub) \
|
||||
V(ArmI8x16SubSaturateS) \
|
||||
V(ArmI8x16Mul) \
|
||||
V(ArmI8x16MinS) \
|
||||
V(ArmI8x16MaxS) \
|
||||
V(ArmI8x16Eq) \
|
||||
V(ArmI8x16Ne) \
|
||||
V(ArmI8x16GtS) \
|
||||
V(ArmI8x16GeS) \
|
||||
V(ArmI8x16ShrU) \
|
||||
V(ArmI8x16UConvertI16x8) \
|
||||
V(ArmI8x16AddSaturateU) \
|
||||
V(ArmI8x16SubSaturateU) \
|
||||
V(ArmI8x16MinU) \
|
||||
V(ArmI8x16MaxU) \
|
||||
V(ArmI8x16GtU) \
|
||||
V(ArmI8x16GeU) \
|
||||
V(ArmS128Zero) \
|
||||
V(ArmS128Dup) \
|
||||
V(ArmS128And) \
|
||||
V(ArmS128Or) \
|
||||
V(ArmS128Xor) \
|
||||
V(ArmS128Not) \
|
||||
V(ArmS128Select) \
|
||||
V(ArmS32x4ZipLeft) \
|
||||
V(ArmS32x4ZipRight) \
|
||||
V(ArmS32x4UnzipLeft) \
|
||||
V(ArmS32x4UnzipRight) \
|
||||
V(ArmS32x4TransposeLeft) \
|
||||
V(ArmS32x4TransposeRight) \
|
||||
V(ArmS32x4Shuffle) \
|
||||
V(ArmS16x8ZipLeft) \
|
||||
V(ArmS16x8ZipRight) \
|
||||
V(ArmS16x8UnzipLeft) \
|
||||
V(ArmS16x8UnzipRight) \
|
||||
V(ArmS16x8TransposeLeft) \
|
||||
V(ArmS16x8TransposeRight) \
|
||||
V(ArmS8x16ZipLeft) \
|
||||
V(ArmS8x16ZipRight) \
|
||||
V(ArmS8x16UnzipLeft) \
|
||||
V(ArmS8x16UnzipRight) \
|
||||
V(ArmS8x16TransposeLeft) \
|
||||
V(ArmS8x16TransposeRight) \
|
||||
V(ArmS8x16Concat) \
|
||||
V(ArmS8x16Shuffle) \
|
||||
V(ArmS32x2Reverse) \
|
||||
V(ArmS16x4Reverse) \
|
||||
V(ArmS16x2Reverse) \
|
||||
V(ArmS8x8Reverse) \
|
||||
V(ArmS8x4Reverse) \
|
||||
V(ArmS8x2Reverse) \
|
||||
V(ArmS1x4AnyTrue) \
|
||||
V(ArmS1x4AllTrue) \
|
||||
V(ArmS1x8AnyTrue) \
|
||||
V(ArmS1x8AllTrue) \
|
||||
V(ArmS1x16AnyTrue) \
|
||||
V(ArmS1x16AllTrue)
|
||||
#define TARGET_ARCH_OPCODE_LIST(V) \
|
||||
V(ArmAdd) \
|
||||
V(ArmAnd) \
|
||||
V(ArmBic) \
|
||||
V(ArmClz) \
|
||||
V(ArmCmp) \
|
||||
V(ArmCmn) \
|
||||
V(ArmTst) \
|
||||
V(ArmTeq) \
|
||||
V(ArmOrr) \
|
||||
V(ArmEor) \
|
||||
V(ArmSub) \
|
||||
V(ArmRsb) \
|
||||
V(ArmMul) \
|
||||
V(ArmMla) \
|
||||
V(ArmMls) \
|
||||
V(ArmSmull) \
|
||||
V(ArmSmmul) \
|
||||
V(ArmSmmla) \
|
||||
V(ArmUmull) \
|
||||
V(ArmSdiv) \
|
||||
V(ArmUdiv) \
|
||||
V(ArmMov) \
|
||||
V(ArmMvn) \
|
||||
V(ArmBfc) \
|
||||
V(ArmUbfx) \
|
||||
V(ArmSbfx) \
|
||||
V(ArmSxtb) \
|
||||
V(ArmSxth) \
|
||||
V(ArmSxtab) \
|
||||
V(ArmSxtah) \
|
||||
V(ArmUxtb) \
|
||||
V(ArmUxth) \
|
||||
V(ArmUxtab) \
|
||||
V(ArmRbit) \
|
||||
V(ArmRev) \
|
||||
V(ArmUxtah) \
|
||||
V(ArmAddPair) \
|
||||
V(ArmSubPair) \
|
||||
V(ArmMulPair) \
|
||||
V(ArmLslPair) \
|
||||
V(ArmLsrPair) \
|
||||
V(ArmAsrPair) \
|
||||
V(ArmVcmpF32) \
|
||||
V(ArmVaddF32) \
|
||||
V(ArmVsubF32) \
|
||||
V(ArmVmulF32) \
|
||||
V(ArmVmlaF32) \
|
||||
V(ArmVmlsF32) \
|
||||
V(ArmVdivF32) \
|
||||
V(ArmVabsF32) \
|
||||
V(ArmVnegF32) \
|
||||
V(ArmVsqrtF32) \
|
||||
V(ArmVcmpF64) \
|
||||
V(ArmVaddF64) \
|
||||
V(ArmVsubF64) \
|
||||
V(ArmVmulF64) \
|
||||
V(ArmVmlaF64) \
|
||||
V(ArmVmlsF64) \
|
||||
V(ArmVdivF64) \
|
||||
V(ArmVmodF64) \
|
||||
V(ArmVabsF64) \
|
||||
V(ArmVnegF64) \
|
||||
V(ArmVsqrtF64) \
|
||||
V(ArmVrintmF32) \
|
||||
V(ArmVrintmF64) \
|
||||
V(ArmVrintpF32) \
|
||||
V(ArmVrintpF64) \
|
||||
V(ArmVrintzF32) \
|
||||
V(ArmVrintzF64) \
|
||||
V(ArmVrintaF64) \
|
||||
V(ArmVrintnF32) \
|
||||
V(ArmVrintnF64) \
|
||||
V(ArmVcvtF32F64) \
|
||||
V(ArmVcvtF64F32) \
|
||||
V(ArmVcvtF32S32) \
|
||||
V(ArmVcvtF32U32) \
|
||||
V(ArmVcvtF64S32) \
|
||||
V(ArmVcvtF64U32) \
|
||||
V(ArmVcvtS32F32) \
|
||||
V(ArmVcvtU32F32) \
|
||||
V(ArmVcvtS32F64) \
|
||||
V(ArmVcvtU32F64) \
|
||||
V(ArmVmovU32F32) \
|
||||
V(ArmVmovF32U32) \
|
||||
V(ArmVmovLowU32F64) \
|
||||
V(ArmVmovLowF64U32) \
|
||||
V(ArmVmovHighU32F64) \
|
||||
V(ArmVmovHighF64U32) \
|
||||
V(ArmVmovF64U32U32) \
|
||||
V(ArmVmovU32U32F64) \
|
||||
V(ArmVldrF32) \
|
||||
V(ArmVstrF32) \
|
||||
V(ArmVldrF64) \
|
||||
V(ArmVld1F64) \
|
||||
V(ArmVstrF64) \
|
||||
V(ArmVst1F64) \
|
||||
V(ArmVld1S128) \
|
||||
V(ArmVst1S128) \
|
||||
V(ArmFloat32Max) \
|
||||
V(ArmFloat64Max) \
|
||||
V(ArmFloat32Min) \
|
||||
V(ArmFloat64Min) \
|
||||
V(ArmFloat64SilenceNaN) \
|
||||
V(ArmLdrb) \
|
||||
V(ArmLdrsb) \
|
||||
V(ArmStrb) \
|
||||
V(ArmLdrh) \
|
||||
V(ArmLdrsh) \
|
||||
V(ArmStrh) \
|
||||
V(ArmLdr) \
|
||||
V(ArmStr) \
|
||||
V(ArmPush) \
|
||||
V(ArmPoke) \
|
||||
V(ArmPeek) \
|
||||
V(ArmDsbIsb) \
|
||||
V(ArmF32x4Splat) \
|
||||
V(ArmF32x4ExtractLane) \
|
||||
V(ArmF32x4ReplaceLane) \
|
||||
V(ArmF32x4SConvertI32x4) \
|
||||
V(ArmF32x4UConvertI32x4) \
|
||||
V(ArmF32x4Abs) \
|
||||
V(ArmF32x4Neg) \
|
||||
V(ArmF32x4RecipApprox) \
|
||||
V(ArmF32x4RecipSqrtApprox) \
|
||||
V(ArmF32x4Add) \
|
||||
V(ArmF32x4AddHoriz) \
|
||||
V(ArmF32x4Sub) \
|
||||
V(ArmF32x4Mul) \
|
||||
V(ArmF32x4Min) \
|
||||
V(ArmF32x4Max) \
|
||||
V(ArmF32x4Eq) \
|
||||
V(ArmF32x4Ne) \
|
||||
V(ArmF32x4Lt) \
|
||||
V(ArmF32x4Le) \
|
||||
V(ArmI32x4Splat) \
|
||||
V(ArmI32x4ExtractLane) \
|
||||
V(ArmI32x4ReplaceLane) \
|
||||
V(ArmI32x4SConvertF32x4) \
|
||||
V(ArmI32x4SConvertI16x8Low) \
|
||||
V(ArmI32x4SConvertI16x8High) \
|
||||
V(ArmI32x4Neg) \
|
||||
V(ArmI32x4Shl) \
|
||||
V(ArmI32x4ShrS) \
|
||||
V(ArmI32x4Add) \
|
||||
V(ArmI32x4AddHoriz) \
|
||||
V(ArmI32x4Sub) \
|
||||
V(ArmI32x4Mul) \
|
||||
V(ArmI32x4MinS) \
|
||||
V(ArmI32x4MaxS) \
|
||||
V(ArmI32x4Eq) \
|
||||
V(ArmI32x4Ne) \
|
||||
V(ArmI32x4GtS) \
|
||||
V(ArmI32x4GeS) \
|
||||
V(ArmI32x4UConvertF32x4) \
|
||||
V(ArmI32x4UConvertI16x8Low) \
|
||||
V(ArmI32x4UConvertI16x8High) \
|
||||
V(ArmI32x4ShrU) \
|
||||
V(ArmI32x4MinU) \
|
||||
V(ArmI32x4MaxU) \
|
||||
V(ArmI32x4GtU) \
|
||||
V(ArmI32x4GeU) \
|
||||
V(ArmI16x8Splat) \
|
||||
V(ArmI16x8ExtractLane) \
|
||||
V(ArmI16x8ReplaceLane) \
|
||||
V(ArmI16x8SConvertI8x16Low) \
|
||||
V(ArmI16x8SConvertI8x16High) \
|
||||
V(ArmI16x8Neg) \
|
||||
V(ArmI16x8Shl) \
|
||||
V(ArmI16x8ShrS) \
|
||||
V(ArmI16x8SConvertI32x4) \
|
||||
V(ArmI16x8Add) \
|
||||
V(ArmI16x8AddSaturateS) \
|
||||
V(ArmI16x8AddHoriz) \
|
||||
V(ArmI16x8Sub) \
|
||||
V(ArmI16x8SubSaturateS) \
|
||||
V(ArmI16x8Mul) \
|
||||
V(ArmI16x8MinS) \
|
||||
V(ArmI16x8MaxS) \
|
||||
V(ArmI16x8Eq) \
|
||||
V(ArmI16x8Ne) \
|
||||
V(ArmI16x8GtS) \
|
||||
V(ArmI16x8GeS) \
|
||||
V(ArmI16x8UConvertI8x16Low) \
|
||||
V(ArmI16x8UConvertI8x16High) \
|
||||
V(ArmI16x8ShrU) \
|
||||
V(ArmI16x8UConvertI32x4) \
|
||||
V(ArmI16x8AddSaturateU) \
|
||||
V(ArmI16x8SubSaturateU) \
|
||||
V(ArmI16x8MinU) \
|
||||
V(ArmI16x8MaxU) \
|
||||
V(ArmI16x8GtU) \
|
||||
V(ArmI16x8GeU) \
|
||||
V(ArmI8x16Splat) \
|
||||
V(ArmI8x16ExtractLane) \
|
||||
V(ArmI8x16ReplaceLane) \
|
||||
V(ArmI8x16Neg) \
|
||||
V(ArmI8x16Shl) \
|
||||
V(ArmI8x16ShrS) \
|
||||
V(ArmI8x16SConvertI16x8) \
|
||||
V(ArmI8x16Add) \
|
||||
V(ArmI8x16AddSaturateS) \
|
||||
V(ArmI8x16Sub) \
|
||||
V(ArmI8x16SubSaturateS) \
|
||||
V(ArmI8x16Mul) \
|
||||
V(ArmI8x16MinS) \
|
||||
V(ArmI8x16MaxS) \
|
||||
V(ArmI8x16Eq) \
|
||||
V(ArmI8x16Ne) \
|
||||
V(ArmI8x16GtS) \
|
||||
V(ArmI8x16GeS) \
|
||||
V(ArmI8x16ShrU) \
|
||||
V(ArmI8x16UConvertI16x8) \
|
||||
V(ArmI8x16AddSaturateU) \
|
||||
V(ArmI8x16SubSaturateU) \
|
||||
V(ArmI8x16MinU) \
|
||||
V(ArmI8x16MaxU) \
|
||||
V(ArmI8x16GtU) \
|
||||
V(ArmI8x16GeU) \
|
||||
V(ArmS128Zero) \
|
||||
V(ArmS128Dup) \
|
||||
V(ArmS128And) \
|
||||
V(ArmS128Or) \
|
||||
V(ArmS128Xor) \
|
||||
V(ArmS128Not) \
|
||||
V(ArmS128Select) \
|
||||
V(ArmS32x4ZipLeft) \
|
||||
V(ArmS32x4ZipRight) \
|
||||
V(ArmS32x4UnzipLeft) \
|
||||
V(ArmS32x4UnzipRight) \
|
||||
V(ArmS32x4TransposeLeft) \
|
||||
V(ArmS32x4TransposeRight) \
|
||||
V(ArmS32x4Shuffle) \
|
||||
V(ArmS16x8ZipLeft) \
|
||||
V(ArmS16x8ZipRight) \
|
||||
V(ArmS16x8UnzipLeft) \
|
||||
V(ArmS16x8UnzipRight) \
|
||||
V(ArmS16x8TransposeLeft) \
|
||||
V(ArmS16x8TransposeRight) \
|
||||
V(ArmS8x16ZipLeft) \
|
||||
V(ArmS8x16ZipRight) \
|
||||
V(ArmS8x16UnzipLeft) \
|
||||
V(ArmS8x16UnzipRight) \
|
||||
V(ArmS8x16TransposeLeft) \
|
||||
V(ArmS8x16TransposeRight) \
|
||||
V(ArmS8x16Concat) \
|
||||
V(ArmS8x16Shuffle) \
|
||||
V(ArmS32x2Reverse) \
|
||||
V(ArmS16x4Reverse) \
|
||||
V(ArmS16x2Reverse) \
|
||||
V(ArmS8x8Reverse) \
|
||||
V(ArmS8x4Reverse) \
|
||||
V(ArmS8x2Reverse) \
|
||||
V(ArmS1x4AnyTrue) \
|
||||
V(ArmS1x4AllTrue) \
|
||||
V(ArmS1x8AnyTrue) \
|
||||
V(ArmS1x8AllTrue) \
|
||||
V(ArmS1x16AnyTrue) \
|
||||
V(ArmS1x16AllTrue) \
|
||||
V(ArmWord32AtomicPairLoad) \
|
||||
V(ArmWord32AtomicPairStore) \
|
||||
V(ArmWord32AtomicPairAdd) \
|
||||
V(ArmWord32AtomicPairSub) \
|
||||
V(ArmWord32AtomicPairAnd) \
|
||||
V(ArmWord32AtomicPairOr) \
|
||||
V(ArmWord32AtomicPairXor) \
|
||||
V(ArmWord32AtomicPairExchange) \
|
||||
V(ArmWord32AtomicPairCompareExchange) \
|
||||
V(ArmWord64AtomicNarrowAddUint8) \
|
||||
V(ArmWord64AtomicNarrowAddUint16) \
|
||||
V(ArmWord64AtomicNarrowAddUint32) \
|
||||
V(ArmWord64AtomicNarrowSubUint8) \
|
||||
V(ArmWord64AtomicNarrowSubUint16) \
|
||||
V(ArmWord64AtomicNarrowSubUint32) \
|
||||
V(ArmWord64AtomicNarrowAndUint8) \
|
||||
V(ArmWord64AtomicNarrowAndUint16) \
|
||||
V(ArmWord64AtomicNarrowAndUint32) \
|
||||
V(ArmWord64AtomicNarrowOrUint8) \
|
||||
V(ArmWord64AtomicNarrowOrUint16) \
|
||||
V(ArmWord64AtomicNarrowOrUint32) \
|
||||
V(ArmWord64AtomicNarrowXorUint8) \
|
||||
V(ArmWord64AtomicNarrowXorUint16) \
|
||||
V(ArmWord64AtomicNarrowXorUint32) \
|
||||
V(ArmWord64AtomicNarrowExchangeUint8) \
|
||||
V(ArmWord64AtomicNarrowExchangeUint16) \
|
||||
V(ArmWord64AtomicNarrowExchangeUint32) \
|
||||
V(ArmWord64AtomicNarrowCompareExchangeUint8) \
|
||||
V(ArmWord64AtomicNarrowCompareExchangeUint16) \
|
||||
V(ArmWord64AtomicNarrowCompareExchangeUint32)
|
||||
|
||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||
// Many instructions support multiple addressing modes. Addressing modes
|
||||
|
@ -264,6 +264,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kArmLdrsh:
|
||||
case kArmLdr:
|
||||
case kArmPeek:
|
||||
case kArmWord32AtomicPairLoad:
|
||||
return kIsLoadOperation;
|
||||
|
||||
case kArmVstrF32:
|
||||
@ -276,6 +277,35 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kArmPush:
|
||||
case kArmPoke:
|
||||
case kArmDsbIsb:
|
||||
case kArmWord32AtomicPairStore:
|
||||
case kArmWord32AtomicPairAdd:
|
||||
case kArmWord32AtomicPairSub:
|
||||
case kArmWord32AtomicPairAnd:
|
||||
case kArmWord32AtomicPairOr:
|
||||
case kArmWord32AtomicPairXor:
|
||||
case kArmWord32AtomicPairExchange:
|
||||
case kArmWord32AtomicPairCompareExchange:
|
||||
case kArmWord64AtomicNarrowAddUint8:
|
||||
case kArmWord64AtomicNarrowAddUint16:
|
||||
case kArmWord64AtomicNarrowAddUint32:
|
||||
case kArmWord64AtomicNarrowSubUint8:
|
||||
case kArmWord64AtomicNarrowSubUint16:
|
||||
case kArmWord64AtomicNarrowSubUint32:
|
||||
case kArmWord64AtomicNarrowAndUint8:
|
||||
case kArmWord64AtomicNarrowAndUint16:
|
||||
case kArmWord64AtomicNarrowAndUint32:
|
||||
case kArmWord64AtomicNarrowOrUint8:
|
||||
case kArmWord64AtomicNarrowOrUint16:
|
||||
case kArmWord64AtomicNarrowOrUint32:
|
||||
case kArmWord64AtomicNarrowXorUint8:
|
||||
case kArmWord64AtomicNarrowXorUint16:
|
||||
case kArmWord64AtomicNarrowXorUint32:
|
||||
case kArmWord64AtomicNarrowExchangeUint8:
|
||||
case kArmWord64AtomicNarrowExchangeUint16:
|
||||
case kArmWord64AtomicNarrowExchangeUint32:
|
||||
case kArmWord64AtomicNarrowCompareExchangeUint8:
|
||||
case kArmWord64AtomicNarrowCompareExchangeUint16:
|
||||
case kArmWord64AtomicNarrowCompareExchangeUint32:
|
||||
return kHasSideEffect;
|
||||
|
||||
#define CASE(Name) case k##Name:
|
||||
|
@ -403,6 +403,46 @@ void EmitStore(InstructionSelector* selector, InstructionCode opcode,
|
||||
selector->Emit(opcode, 0, nullptr, input_count, inputs);
|
||||
}
|
||||
|
||||
void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
|
||||
ArchOpcode opcode) {
|
||||
ArmOperandGenerator g(selector);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
Node* value_high = node->InputAt(3);
|
||||
AddressingMode addressing_mode = kMode_Offset_RR;
|
||||
InstructionOperand inputs[] = {g.UseUniqueRegister(value),
|
||||
g.UseUniqueRegister(value_high),
|
||||
g.UseRegister(base), g.UseRegister(index)};
|
||||
InstructionOperand outputs[] = {
|
||||
g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r2),
|
||||
g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r3)};
|
||||
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r6),
|
||||
g.TempRegister(r7), g.TempRegister()};
|
||||
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
|
||||
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
|
||||
arraysize(temps), temps);
|
||||
}
|
||||
|
||||
void VisitNarrowAtomicBinOp(InstructionSelector* selector, Node* node,
|
||||
ArchOpcode opcode) {
|
||||
ArmOperandGenerator g(selector);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
AddressingMode addressing_mode = kMode_Offset_RR;
|
||||
InstructionOperand inputs[3] = {g.UseRegister(base), g.UseRegister(index),
|
||||
g.UseUniqueRegister(value)};
|
||||
InstructionOperand outputs[] = {
|
||||
g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r4),
|
||||
g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r5)};
|
||||
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
|
||||
g.TempRegister()};
|
||||
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
|
||||
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
|
||||
arraysize(temps), temps);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void InstructionSelector::VisitStackSlot(Node* node) {
|
||||
@ -2221,6 +2261,190 @@ VISIT_ATOMIC_BINOP(Or)
|
||||
VISIT_ATOMIC_BINOP(Xor)
|
||||
#undef VISIT_ATOMIC_BINOP
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
|
||||
InstructionOperand outputs[] = {
|
||||
g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r0),
|
||||
g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r1)};
|
||||
InstructionOperand temps[] = {g.TempRegister()};
|
||||
AddressingMode addressing_mode = kMode_Offset_RR;
|
||||
InstructionCode code =
|
||||
kArmWord32AtomicPairLoad | AddressingModeField::encode(addressing_mode);
|
||||
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
|
||||
arraysize(temps), temps);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value_low = node->InputAt(2);
|
||||
Node* value_high = node->InputAt(3);
|
||||
AddressingMode addressing_mode = kMode_Offset_RR;
|
||||
InstructionOperand inputs[] = {
|
||||
g.UseUniqueRegister(base), g.UseUniqueRegister(index),
|
||||
g.UseFixed(value_low, r2), g.UseFixed(value_high, r3)};
|
||||
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r0),
|
||||
g.TempRegister(r1)};
|
||||
InstructionCode code =
|
||||
kArmWord32AtomicPairStore | AddressingModeField::encode(addressing_mode);
|
||||
Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
|
||||
VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairAdd);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
|
||||
VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairSub);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
|
||||
VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairAnd);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
|
||||
VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairOr);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
|
||||
VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairXor);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord64AtomicNarrowBinop(Node* node,
|
||||
ArchOpcode uint8_op,
|
||||
ArchOpcode uint16_op,
|
||||
ArchOpcode uint32_op) {
|
||||
MachineType type = AtomicOpType(node->op());
|
||||
DCHECK(type != MachineType::Uint64());
|
||||
ArchOpcode opcode = kArchNop;
|
||||
if (type == MachineType::Uint32()) {
|
||||
opcode = uint32_op;
|
||||
} else if (type == MachineType::Uint16()) {
|
||||
opcode = uint16_op;
|
||||
} else if (type == MachineType::Uint8()) {
|
||||
opcode = uint8_op;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
VisitNarrowAtomicBinOp(this, node, opcode);
|
||||
}
|
||||
|
||||
#define VISIT_ATOMIC_BINOP(op) \
|
||||
void InstructionSelector::VisitWord64AtomicNarrow##op(Node* node) { \
|
||||
VisitWord64AtomicNarrowBinop(node, kArmWord64AtomicNarrow##op##Uint8, \
|
||||
kArmWord64AtomicNarrow##op##Uint16, \
|
||||
kArmWord64AtomicNarrow##op##Uint32); \
|
||||
}
|
||||
VISIT_ATOMIC_BINOP(Add)
|
||||
VISIT_ATOMIC_BINOP(Sub)
|
||||
VISIT_ATOMIC_BINOP(And)
|
||||
VISIT_ATOMIC_BINOP(Or)
|
||||
VISIT_ATOMIC_BINOP(Xor)
|
||||
#undef VISIT_ATOMIC_BINOP
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
Node* value_high = node->InputAt(3);
|
||||
AddressingMode addressing_mode = kMode_Offset_RR;
|
||||
InstructionOperand inputs[] = {g.UseFixed(value, r0),
|
||||
g.UseFixed(value_high, r1),
|
||||
g.UseRegister(base), g.UseRegister(index)};
|
||||
InstructionOperand outputs[] = {
|
||||
g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r6),
|
||||
g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r7)};
|
||||
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
|
||||
InstructionCode code = kArmWord32AtomicPairExchange |
|
||||
AddressingModeField::encode(addressing_mode);
|
||||
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
|
||||
arraysize(temps), temps);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
MachineType type = AtomicOpType(node->op());
|
||||
if (type == MachineType::Uint8()) {
|
||||
opcode = kArmWord64AtomicNarrowExchangeUint8;
|
||||
} else if (type == MachineType::Uint16()) {
|
||||
opcode = kArmWord64AtomicNarrowExchangeUint16;
|
||||
} else if (type == MachineType::Uint32()) {
|
||||
opcode = kArmWord64AtomicNarrowExchangeUint32;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
AddressingMode addressing_mode = kMode_Offset_RR;
|
||||
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
|
||||
g.UseUniqueRegister(value)};
|
||||
InstructionOperand outputs[] = {
|
||||
g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
|
||||
g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
|
||||
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
|
||||
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
|
||||
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
|
||||
arraysize(temps), temps);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
AddressingMode addressing_mode = kMode_Offset_RR;
|
||||
InstructionOperand inputs[] = {
|
||||
g.UseFixed(node->InputAt(2), r4), g.UseFixed(node->InputAt(3), r5),
|
||||
g.UseFixed(node->InputAt(4), r8), g.UseFixed(node->InputAt(5), r9),
|
||||
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))};
|
||||
InstructionOperand outputs[] = {
|
||||
g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r2),
|
||||
g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r3)};
|
||||
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
|
||||
InstructionCode code = kArmWord32AtomicPairCompareExchange |
|
||||
AddressingModeField::encode(addressing_mode);
|
||||
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
|
||||
arraysize(temps), temps);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* old_value = node->InputAt(2);
|
||||
Node* new_value = node->InputAt(3);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
MachineType type = AtomicOpType(node->op());
|
||||
if (type == MachineType::Uint8()) {
|
||||
opcode = kArmWord64AtomicNarrowCompareExchangeUint8;
|
||||
} else if (type == MachineType::Uint16()) {
|
||||
opcode = kArmWord64AtomicNarrowCompareExchangeUint16;
|
||||
} else if (type == MachineType::Uint32()) {
|
||||
opcode = kArmWord64AtomicNarrowCompareExchangeUint32;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
AddressingMode addressing_mode = kMode_Offset_RR;
|
||||
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
|
||||
g.UseUniqueRegister(old_value),
|
||||
g.UseUniqueRegister(new_value)};
|
||||
InstructionOperand outputs[] = {
|
||||
g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
|
||||
g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
|
||||
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
|
||||
g.TempRegister()};
|
||||
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
|
||||
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
|
||||
arraysize(temps), temps);
|
||||
}
|
||||
|
||||
#define SIMD_TYPE_LIST(V) \
|
||||
V(F32x4) \
|
||||
V(I32x4) \
|
||||
|
@ -2389,7 +2389,7 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
|
||||
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
|
||||
#endif // V8_TARGET_ARCH_64_BIT
|
||||
|
||||
#if !V8_TARGET_ARCH_IA32
|
||||
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
|
||||
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
@ -2453,7 +2453,7 @@ void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
|
||||
void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
#endif // !V8_TARGET_ARCH_IA32
|
||||
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
|
||||
|
||||
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
|
||||
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
|
||||
|
@ -243,6 +243,7 @@ v8_source_set("cctest_sources") {
|
||||
"wasm/test-run-wasm-64.cc",
|
||||
"wasm/test-run-wasm-asmjs.cc",
|
||||
"wasm/test-run-wasm-atomics.cc",
|
||||
"wasm/test-run-wasm-atomics64.cc",
|
||||
"wasm/test-run-wasm-interpreter.cc",
|
||||
"wasm/test-run-wasm-js.cc",
|
||||
"wasm/test-run-wasm-module.cc",
|
||||
@ -299,7 +300,6 @@ v8_source_set("cctest_sources") {
|
||||
"test-sync-primitives-arm64.cc",
|
||||
"test-utils-arm64.cc",
|
||||
"test-utils-arm64.h",
|
||||
"wasm/test-run-wasm-atomics64.cc",
|
||||
]
|
||||
} else if (v8_current_cpu == "x86") {
|
||||
sources += [ ### gcmole(arch:ia32) ###
|
||||
@ -309,7 +309,6 @@ v8_source_set("cctest_sources") {
|
||||
"test-code-stubs.h",
|
||||
"test-disasm-ia32.cc",
|
||||
"test-log-stack-tracer.cc",
|
||||
"wasm/test-run-wasm-atomics64.cc",
|
||||
]
|
||||
} else if (v8_current_cpu == "mips") {
|
||||
sources += [ ### gcmole(arch:mips) ###
|
||||
@ -356,7 +355,6 @@ v8_source_set("cctest_sources") {
|
||||
"test-disasm-x64.cc",
|
||||
"test-log-stack-tracer.cc",
|
||||
"test-macro-assembler-x64.cc",
|
||||
"wasm/test-run-wasm-atomics64.cc",
|
||||
]
|
||||
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
|
||||
sources += [ ### gcmole(arch:ppc) ###
|
||||
|
@ -344,6 +344,12 @@
|
||||
'test-run-wasm-simd/RunWasm_ReductionTest16_compiled': [SKIP],
|
||||
}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips'
|
||||
|
||||
##############################################################################
|
||||
['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', {
|
||||
# TODO(mips-team): Implement I64Atomic operations on MIPS
|
||||
'test-run-wasm-atomics64/*': [SKIP],
|
||||
}], # 'arch == mipsel or arch == mips64el or arch == mips or arch == mips64'
|
||||
|
||||
##############################################################################
|
||||
['mips_arch_variant == r6', {
|
||||
# For MIPS[64] architecture release 6, fusion multiply-accumulate instructions
|
||||
|
@ -1615,8 +1615,8 @@ TEST(LoadStoreExclusive) {
|
||||
COMPARE(strexh(r0, r1, r2), "e1e20f91 strexh r0, r1, [r2]");
|
||||
COMPARE(ldrex(r0, r1), "e1910f9f ldrex r0, [r1]");
|
||||
COMPARE(strex(r0, r1, r2), "e1820f91 strex r0, r1, [r2]");
|
||||
COMPARE(ldrexd(r0, r1, MemOperand(r2)), "e1b20f9f ldrexd r0, [r2]");
|
||||
COMPARE(strexd(r0, r2, r3, MemOperand(r4)),
|
||||
COMPARE(ldrexd(r0, r1, r2), "e1b20f9f ldrexd r0, [r2]");
|
||||
COMPARE(strexd(r0, r2, r3, r4),
|
||||
"e1a40f92 strexd r0, r2, [r4]");
|
||||
|
||||
VERIFY_RUN();
|
||||
|
Loading…
Reference in New Issue
Block a user