PPC: Cleanup Atomic64 Ops
R=joransiu@ca.ibm.com Change-Id: I154b3cac455d44a84f7d6363758841310010d749 Reviewed-on: https://chromium-review.googlesource.com/c/1296683 Commit-Queue: Junliang Yan <jyan@ca.ibm.com> Reviewed-by: Joran Siu <joransiu@ca.ibm.com> Cr-Commit-Position: refs/heads/master@{#56969}
This commit is contained in:
parent
9352171ec9
commit
a2415d540b
@ -526,72 +526,64 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
|
||||
DoubleRegister result = i.OutputDoubleRegister(); \
|
||||
AddressingMode mode = kMode_None; \
|
||||
MemOperand operand = i.MemoryOperand(&mode); \
|
||||
bool is_atomic = i.InputInt32(2); \
|
||||
if (mode == kMode_MRI) { \
|
||||
__ asm_instr(result, operand); \
|
||||
} else { \
|
||||
__ asm_instrx(result, operand); \
|
||||
} \
|
||||
if (is_atomic) __ lwsync(); \
|
||||
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
|
||||
do { \
|
||||
Register result = i.OutputRegister(); \
|
||||
AddressingMode mode = kMode_None; \
|
||||
MemOperand operand = i.MemoryOperand(&mode); \
|
||||
bool is_atomic = i.InputInt32(2); \
|
||||
if (mode == kMode_MRI) { \
|
||||
__ asm_instr(result, operand); \
|
||||
} else { \
|
||||
__ asm_instrx(result, operand); \
|
||||
} \
|
||||
if (is_atomic) __ lwsync(); \
|
||||
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define ASSEMBLE_STORE_FLOAT32() \
|
||||
#define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrx) \
|
||||
do { \
|
||||
size_t index = 0; \
|
||||
AddressingMode mode = kMode_None; \
|
||||
MemOperand operand = i.MemoryOperand(&mode, &index); \
|
||||
DoubleRegister value = i.InputDoubleRegister(index); \
|
||||
bool is_atomic = i.InputInt32(3); \
|
||||
if (is_atomic) __ lwsync(); \
|
||||
/* removed frsp as instruction-selector checked */ \
|
||||
/* value to be kFloat32 */ \
|
||||
if (mode == kMode_MRI) { \
|
||||
__ stfs(value, operand); \
|
||||
__ asm_instr(value, operand); \
|
||||
} else { \
|
||||
__ stfsx(value, operand); \
|
||||
__ asm_instrx(value, operand); \
|
||||
} \
|
||||
if (is_atomic) __ sync(); \
|
||||
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define ASSEMBLE_STORE_DOUBLE() \
|
||||
do { \
|
||||
size_t index = 0; \
|
||||
AddressingMode mode = kMode_None; \
|
||||
MemOperand operand = i.MemoryOperand(&mode, &index); \
|
||||
DoubleRegister value = i.InputDoubleRegister(index); \
|
||||
if (mode == kMode_MRI) { \
|
||||
__ stfd(value, operand); \
|
||||
} else { \
|
||||
__ stfdx(value, operand); \
|
||||
} \
|
||||
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx) \
|
||||
do { \
|
||||
size_t index = 0; \
|
||||
AddressingMode mode = kMode_None; \
|
||||
MemOperand operand = i.MemoryOperand(&mode, &index); \
|
||||
Register value = i.InputRegister(index); \
|
||||
bool is_atomic = i.InputInt32(3); \
|
||||
if (is_atomic) __ lwsync(); \
|
||||
if (mode == kMode_MRI) { \
|
||||
__ asm_instr(value, operand); \
|
||||
} else { \
|
||||
__ asm_instrx(value, operand); \
|
||||
} \
|
||||
if (is_atomic) __ sync(); \
|
||||
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
|
||||
} while (0)
|
||||
|
||||
@ -602,54 +594,30 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
|
||||
#define CleanUInt32(x)
|
||||
#endif
|
||||
|
||||
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
|
||||
do { \
|
||||
Label done; \
|
||||
Register result = i.OutputRegister(); \
|
||||
AddressingMode mode = kMode_None; \
|
||||
MemOperand operand = i.MemoryOperand(&mode); \
|
||||
if (mode == kMode_MRI) { \
|
||||
__ asm_instr(result, operand); \
|
||||
} else { \
|
||||
__ asm_instrx(result, operand); \
|
||||
} \
|
||||
__ lwsync(); \
|
||||
} while (0)
|
||||
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx) \
|
||||
do { \
|
||||
size_t index = 0; \
|
||||
AddressingMode mode = kMode_None; \
|
||||
MemOperand operand = i.MemoryOperand(&mode, &index); \
|
||||
Register value = i.InputRegister(index); \
|
||||
__ lwsync(); \
|
||||
if (mode == kMode_MRI) { \
|
||||
__ asm_instr(value, operand); \
|
||||
} else { \
|
||||
__ asm_instrx(value, operand); \
|
||||
} \
|
||||
__ sync(); \
|
||||
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
|
||||
} while (0)
|
||||
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
|
||||
do { \
|
||||
Label exchange; \
|
||||
__ lwsync(); \
|
||||
__ bind(&exchange); \
|
||||
__ load_instr(i.OutputRegister(0), \
|
||||
MemOperand(i.InputRegister(0), i.InputRegister(1))); \
|
||||
__ store_instr(i.InputRegister(2), \
|
||||
MemOperand(i.InputRegister(0), i.InputRegister(1))); \
|
||||
__ bne(&exchange, cr0); \
|
||||
__ sync(); \
|
||||
} while (0)
|
||||
|
||||
#define ASSEMBLE_ATOMIC_BINOP(bin_inst, load_inst, store_inst) \
|
||||
do { \
|
||||
MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
|
||||
Label binop; \
|
||||
__ lwsync(); \
|
||||
__ bind(&binop); \
|
||||
__ load_inst(i.OutputRegister(), operand); \
|
||||
__ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2)); \
|
||||
__ store_inst(kScratchReg, operand); \
|
||||
__ bne(&binop, cr0); \
|
||||
__ sync(); \
|
||||
} while (false)
|
||||
|
||||
#define ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(bin_inst, load_inst, store_inst, \
|
||||
@ -657,12 +625,14 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
|
||||
do { \
|
||||
MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
|
||||
Label binop; \
|
||||
__ lwsync(); \
|
||||
__ bind(&binop); \
|
||||
__ load_inst(i.OutputRegister(), operand); \
|
||||
__ ext_instr(i.OutputRegister(), i.OutputRegister()); \
|
||||
__ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2)); \
|
||||
__ store_inst(kScratchReg, operand); \
|
||||
__ bne(&binop, cr0); \
|
||||
__ sync(); \
|
||||
} while (false)
|
||||
|
||||
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp_inst, load_inst, store_inst) \
|
||||
@ -670,6 +640,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
|
||||
MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
|
||||
Label loop; \
|
||||
Label exit; \
|
||||
__ lwsync(); \
|
||||
__ bind(&loop); \
|
||||
__ load_inst(i.OutputRegister(), operand); \
|
||||
__ cmp_inst(i.OutputRegister(), i.InputRegister(2), cr0); \
|
||||
@ -677,6 +648,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
|
||||
__ store_inst(i.InputRegister(3), operand); \
|
||||
__ bne(&loop, cr0); \
|
||||
__ bind(&exit); \
|
||||
__ sync(); \
|
||||
} while (false)
|
||||
|
||||
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp_inst, load_inst, \
|
||||
@ -685,6 +657,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
|
||||
MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
|
||||
Label loop; \
|
||||
Label exit; \
|
||||
__ lwsync(); \
|
||||
__ bind(&loop); \
|
||||
__ load_inst(i.OutputRegister(), operand); \
|
||||
__ ext_instr(i.OutputRegister(), i.OutputRegister()); \
|
||||
@ -693,6 +666,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
|
||||
__ store_inst(i.InputRegister(3), operand); \
|
||||
__ bne(&loop, cr0); \
|
||||
__ bind(&exit); \
|
||||
__ sync(); \
|
||||
} while (false)
|
||||
|
||||
void CodeGenerator::AssembleDeconstructFrame() {
|
||||
@ -1947,41 +1921,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
#endif
|
||||
case kPPC_StoreFloat32:
|
||||
ASSEMBLE_STORE_FLOAT32();
|
||||
ASSEMBLE_STORE_FLOAT(stfs, stfsx);
|
||||
break;
|
||||
case kPPC_StoreDouble:
|
||||
ASSEMBLE_STORE_DOUBLE();
|
||||
ASSEMBLE_STORE_FLOAT(stfd, stfdx);
|
||||
break;
|
||||
case kWord32AtomicLoadInt8:
|
||||
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
|
||||
__ extsb(i.OutputRegister(), i.OutputRegister());
|
||||
break;
|
||||
case kPPC_AtomicLoadUint8:
|
||||
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
|
||||
break;
|
||||
case kWord32AtomicLoadInt16:
|
||||
ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax);
|
||||
break;
|
||||
case kPPC_AtomicLoadUint16:
|
||||
ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx);
|
||||
break;
|
||||
case kPPC_AtomicLoadWord32:
|
||||
ASSEMBLE_ATOMIC_LOAD_INTEGER(lwz, lwzx);
|
||||
break;
|
||||
case kPPC_AtomicLoadWord64:
|
||||
ASSEMBLE_ATOMIC_LOAD_INTEGER(ld, ldx);
|
||||
break;
|
||||
case kPPC_AtomicStoreUint8:
|
||||
ASSEMBLE_ATOMIC_STORE_INTEGER(stb, stbx);
|
||||
break;
|
||||
case kPPC_AtomicStoreUint16:
|
||||
ASSEMBLE_ATOMIC_STORE_INTEGER(sth, sthx);
|
||||
break;
|
||||
case kPPC_AtomicStoreWord32:
|
||||
ASSEMBLE_ATOMIC_STORE_INTEGER(stw, stwx);
|
||||
break;
|
||||
case kPPC_AtomicStoreWord64:
|
||||
ASSEMBLE_ATOMIC_STORE_INTEGER(std, stdx);
|
||||
UNREACHABLE();
|
||||
break;
|
||||
case kWord32AtomicExchangeInt8:
|
||||
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
|
||||
@ -2023,22 +1978,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
|
||||
#define ATOMIC_BINOP_CASE(op, inst) \
|
||||
case kWord32Atomic##op##Int8: \
|
||||
case kPPC_Atomic##op##Int8: \
|
||||
ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lbarx, stbcx, extsb); \
|
||||
break; \
|
||||
case kPPC_Atomic##op##Uint8: \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, lbarx, stbcx); \
|
||||
break; \
|
||||
case kWord32Atomic##op##Int16: \
|
||||
case kPPC_Atomic##op##Int16: \
|
||||
ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lharx, sthcx, extsh); \
|
||||
break; \
|
||||
case kPPC_Atomic##op##Uint16: \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, lharx, sthcx); \
|
||||
break; \
|
||||
case kPPC_Atomic##op##Word32: \
|
||||
case kPPC_Atomic##op##Int32: \
|
||||
ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lwarx, stwcx, extsw); \
|
||||
break; \
|
||||
case kPPC_Atomic##op##Uint32: \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, lwarx, stwcx); \
|
||||
break; \
|
||||
case kPPC_Atomic##op##Word64: \
|
||||
case kPPC_Atomic##op##Int64: \
|
||||
case kPPC_Atomic##op##Uint64: \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, ldarx, stdcx); \
|
||||
break;
|
||||
ATOMIC_BINOP_CASE(Add, add)
|
||||
|
@ -145,24 +145,44 @@ namespace compiler {
|
||||
V(PPC_AtomicCompareExchangeWord64) \
|
||||
V(PPC_AtomicAddUint8) \
|
||||
V(PPC_AtomicAddUint16) \
|
||||
V(PPC_AtomicAddWord32) \
|
||||
V(PPC_AtomicAddWord64) \
|
||||
V(PPC_AtomicAddUint32) \
|
||||
V(PPC_AtomicAddUint64) \
|
||||
V(PPC_AtomicAddInt8) \
|
||||
V(PPC_AtomicAddInt16) \
|
||||
V(PPC_AtomicAddInt32) \
|
||||
V(PPC_AtomicAddInt64) \
|
||||
V(PPC_AtomicSubUint8) \
|
||||
V(PPC_AtomicSubUint16) \
|
||||
V(PPC_AtomicSubWord32) \
|
||||
V(PPC_AtomicSubWord64) \
|
||||
V(PPC_AtomicSubUint32) \
|
||||
V(PPC_AtomicSubUint64) \
|
||||
V(PPC_AtomicSubInt8) \
|
||||
V(PPC_AtomicSubInt16) \
|
||||
V(PPC_AtomicSubInt32) \
|
||||
V(PPC_AtomicSubInt64) \
|
||||
V(PPC_AtomicAndUint8) \
|
||||
V(PPC_AtomicAndUint16) \
|
||||
V(PPC_AtomicAndWord32) \
|
||||
V(PPC_AtomicAndWord64) \
|
||||
V(PPC_AtomicAndUint32) \
|
||||
V(PPC_AtomicAndUint64) \
|
||||
V(PPC_AtomicAndInt8) \
|
||||
V(PPC_AtomicAndInt16) \
|
||||
V(PPC_AtomicAndInt32) \
|
||||
V(PPC_AtomicAndInt64) \
|
||||
V(PPC_AtomicOrUint8) \
|
||||
V(PPC_AtomicOrUint16) \
|
||||
V(PPC_AtomicOrWord32) \
|
||||
V(PPC_AtomicOrWord64) \
|
||||
V(PPC_AtomicOrUint32) \
|
||||
V(PPC_AtomicOrUint64) \
|
||||
V(PPC_AtomicOrInt8) \
|
||||
V(PPC_AtomicOrInt16) \
|
||||
V(PPC_AtomicOrInt32) \
|
||||
V(PPC_AtomicOrInt64) \
|
||||
V(PPC_AtomicXorUint8) \
|
||||
V(PPC_AtomicXorUint16) \
|
||||
V(PPC_AtomicXorWord32) \
|
||||
V(PPC_AtomicXorWord64)
|
||||
V(PPC_AtomicXorUint32) \
|
||||
V(PPC_AtomicXorUint64) \
|
||||
V(PPC_AtomicXorInt8) \
|
||||
V(PPC_AtomicXorInt16) \
|
||||
V(PPC_AtomicXorInt32) \
|
||||
V(PPC_AtomicXorInt64)
|
||||
|
||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||
// Many instructions support multiple addressing modes. Addressing modes
|
||||
|
@ -155,24 +155,44 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
||||
case kPPC_AtomicCompareExchangeWord64:
|
||||
case kPPC_AtomicAddUint8:
|
||||
case kPPC_AtomicAddUint16:
|
||||
case kPPC_AtomicAddWord32:
|
||||
case kPPC_AtomicAddWord64:
|
||||
case kPPC_AtomicAddUint32:
|
||||
case kPPC_AtomicAddUint64:
|
||||
case kPPC_AtomicAddInt8:
|
||||
case kPPC_AtomicAddInt16:
|
||||
case kPPC_AtomicAddInt32:
|
||||
case kPPC_AtomicAddInt64:
|
||||
case kPPC_AtomicSubUint8:
|
||||
case kPPC_AtomicSubUint16:
|
||||
case kPPC_AtomicSubWord32:
|
||||
case kPPC_AtomicSubWord64:
|
||||
case kPPC_AtomicSubUint32:
|
||||
case kPPC_AtomicSubUint64:
|
||||
case kPPC_AtomicSubInt8:
|
||||
case kPPC_AtomicSubInt16:
|
||||
case kPPC_AtomicSubInt32:
|
||||
case kPPC_AtomicSubInt64:
|
||||
case kPPC_AtomicAndUint8:
|
||||
case kPPC_AtomicAndUint16:
|
||||
case kPPC_AtomicAndWord32:
|
||||
case kPPC_AtomicAndWord64:
|
||||
case kPPC_AtomicAndUint32:
|
||||
case kPPC_AtomicAndUint64:
|
||||
case kPPC_AtomicAndInt8:
|
||||
case kPPC_AtomicAndInt16:
|
||||
case kPPC_AtomicAndInt32:
|
||||
case kPPC_AtomicAndInt64:
|
||||
case kPPC_AtomicOrUint8:
|
||||
case kPPC_AtomicOrUint16:
|
||||
case kPPC_AtomicOrWord32:
|
||||
case kPPC_AtomicOrWord64:
|
||||
case kPPC_AtomicOrUint32:
|
||||
case kPPC_AtomicOrUint64:
|
||||
case kPPC_AtomicOrInt8:
|
||||
case kPPC_AtomicOrInt16:
|
||||
case kPPC_AtomicOrInt32:
|
||||
case kPPC_AtomicOrInt64:
|
||||
case kPPC_AtomicXorUint8:
|
||||
case kPPC_AtomicXorUint16:
|
||||
case kPPC_AtomicXorWord32:
|
||||
case kPPC_AtomicXorWord64:
|
||||
case kPPC_AtomicXorUint32:
|
||||
case kPPC_AtomicXorUint64:
|
||||
case kPPC_AtomicXorInt8:
|
||||
case kPPC_AtomicXorInt16:
|
||||
case kPPC_AtomicXorInt32:
|
||||
case kPPC_AtomicXorInt64:
|
||||
return kHasSideEffect;
|
||||
|
||||
#define CASE(Name) case k##Name:
|
||||
|
@ -204,15 +204,9 @@ void InstructionSelector::VisitLoad(Node* node) {
|
||||
case MachineRepresentation::kWord16:
|
||||
opcode = load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
|
||||
break;
|
||||
#if !V8_TARGET_ARCH_PPC64
|
||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||
case MachineRepresentation::kTaggedPointer: // Fall through.
|
||||
case MachineRepresentation::kTagged: // Fall through.
|
||||
#endif
|
||||
case MachineRepresentation::kWord32:
|
||||
opcode = kPPC_LoadWordU32;
|
||||
break;
|
||||
#if V8_TARGET_ARCH_PPC64
|
||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||
case MachineRepresentation::kTaggedPointer: // Fall through.
|
||||
case MachineRepresentation::kTagged: // Fall through.
|
||||
@ -220,9 +214,6 @@ void InstructionSelector::VisitLoad(Node* node) {
|
||||
opcode = kPPC_LoadWord64;
|
||||
mode = kInt16Imm_4ByteAligned;
|
||||
break;
|
||||
#else
|
||||
case MachineRepresentation::kWord64: // Fall through.
|
||||
#endif
|
||||
case MachineRepresentation::kSimd128: // Fall through.
|
||||
case MachineRepresentation::kNone:
|
||||
UNREACHABLE();
|
||||
@ -234,15 +225,21 @@ void InstructionSelector::VisitLoad(Node* node) {
|
||||
opcode |= MiscField::encode(kMemoryAccessPoisoned);
|
||||
}
|
||||
|
||||
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
|
||||
node->opcode() == IrOpcode::kWord64AtomicLoad);
|
||||
|
||||
if (g.CanBeImmediate(offset, mode)) {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRI),
|
||||
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
|
||||
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset),
|
||||
g.UseImmediate(is_atomic));
|
||||
} else if (g.CanBeImmediate(base, mode)) {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRI),
|
||||
g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base));
|
||||
g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base),
|
||||
g.UseImmediate(is_atomic));
|
||||
} else {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRR),
|
||||
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset));
|
||||
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
|
||||
g.UseImmediate(is_atomic));
|
||||
}
|
||||
}
|
||||
|
||||
@ -259,9 +256,19 @@ void InstructionSelector::VisitStore(Node* node) {
|
||||
Node* offset = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
|
||||
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicStore ||
|
||||
node->opcode() == IrOpcode::kWord64AtomicStore);
|
||||
|
||||
MachineRepresentation rep;
|
||||
WriteBarrierKind write_barrier_kind = kNoWriteBarrier;
|
||||
|
||||
if (is_atomic) {
|
||||
rep = AtomicStoreRepresentationOf(node->op());
|
||||
} else {
|
||||
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
|
||||
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
|
||||
MachineRepresentation rep = store_rep.representation();
|
||||
write_barrier_kind = store_rep.write_barrier_kind();
|
||||
rep = store_rep.representation();
|
||||
}
|
||||
|
||||
if (write_barrier_kind != kNoWriteBarrier) {
|
||||
DCHECK(CanBeTaggedPointer(rep));
|
||||
@ -303,6 +310,7 @@ void InstructionSelector::VisitStore(Node* node) {
|
||||
InstructionCode code = kArchStoreWithWriteBarrier;
|
||||
code |= AddressingModeField::encode(addressing_mode);
|
||||
code |= MiscField::encode(static_cast<int>(record_write_mode));
|
||||
CHECK_EQ(is_atomic, false);
|
||||
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
|
||||
} else {
|
||||
ArchOpcode opcode = kArchNop;
|
||||
@ -345,15 +353,19 @@ void InstructionSelector::VisitStore(Node* node) {
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
|
||||
if (g.CanBeImmediate(offset, mode)) {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
|
||||
g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
|
||||
g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value),
|
||||
g.UseImmediate(is_atomic));
|
||||
} else if (g.CanBeImmediate(base, mode)) {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
|
||||
g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
|
||||
g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value),
|
||||
g.UseImmediate(is_atomic));
|
||||
} else {
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
|
||||
g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
|
||||
g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value),
|
||||
g.UseImmediate(is_atomic));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -868,7 +880,7 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
|
||||
if (g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)) {
|
||||
Emit(kPPC_LoadWordS32 | AddressingModeField::encode(kMode_MRI),
|
||||
g.DefineAsRegister(node), g.UseRegister(mleft.base()),
|
||||
g.TempImmediate(offset));
|
||||
g.TempImmediate(offset), g.UseImmediate(0));
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -1931,122 +1943,16 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
|
||||
g.UseRegister(left), g.UseRegister(right));
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
|
||||
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
||||
PPCOperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
switch (load_rep.representation()) {
|
||||
case MachineRepresentation::kWord8:
|
||||
opcode =
|
||||
load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kPPC_AtomicLoadUint8;
|
||||
break;
|
||||
case MachineRepresentation::kWord16:
|
||||
opcode =
|
||||
load_rep.IsSigned() ? kWord32AtomicLoadInt16 : kPPC_AtomicLoadUint16;
|
||||
break;
|
||||
case MachineRepresentation::kWord32:
|
||||
opcode = kPPC_AtomicLoadWord32;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRR),
|
||||
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
|
||||
}
|
||||
void InstructionSelector::VisitWord32AtomicLoad(Node* node) { VisitLoad(node); }
|
||||
|
||||
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
|
||||
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
|
||||
PPCOperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
switch (load_rep.representation()) {
|
||||
case MachineRepresentation::kWord8:
|
||||
opcode = kPPC_AtomicLoadUint8;
|
||||
break;
|
||||
case MachineRepresentation::kWord16:
|
||||
opcode = kPPC_AtomicLoadUint16;
|
||||
break;
|
||||
case MachineRepresentation::kWord32:
|
||||
opcode = kPPC_AtomicLoadWord32;
|
||||
break;
|
||||
case MachineRepresentation::kWord64:
|
||||
opcode = kPPC_AtomicLoadWord64;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRR),
|
||||
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
|
||||
}
|
||||
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { VisitLoad(node); }
|
||||
|
||||
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
|
||||
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
|
||||
PPCOperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
switch (rep) {
|
||||
case MachineRepresentation::kWord8:
|
||||
opcode = kPPC_AtomicStoreUint8;
|
||||
break;
|
||||
case MachineRepresentation::kWord16:
|
||||
opcode = kPPC_AtomicStoreUint16;
|
||||
break;
|
||||
case MachineRepresentation::kWord32:
|
||||
opcode = kPPC_AtomicStoreWord32;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
|
||||
InstructionOperand inputs[4];
|
||||
size_t input_count = 0;
|
||||
inputs[input_count++] = g.UseUniqueRegister(base);
|
||||
inputs[input_count++] = g.UseUniqueRegister(index);
|
||||
inputs[input_count++] = g.UseUniqueRegister(value);
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRR),
|
||||
0, nullptr, input_count, inputs);
|
||||
VisitStore(node);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
|
||||
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
|
||||
PPCOperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
switch (rep) {
|
||||
case MachineRepresentation::kWord8:
|
||||
opcode = kPPC_AtomicStoreUint8;
|
||||
break;
|
||||
case MachineRepresentation::kWord16:
|
||||
opcode = kPPC_AtomicStoreUint16;
|
||||
break;
|
||||
case MachineRepresentation::kWord32:
|
||||
opcode = kPPC_AtomicStoreWord32;
|
||||
break;
|
||||
case MachineRepresentation::kWord64:
|
||||
opcode = kPPC_AtomicStoreWord64;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
|
||||
InstructionOperand inputs[4];
|
||||
size_t input_count = 0;
|
||||
inputs[input_count++] = g.UseUniqueRegister(base);
|
||||
inputs[input_count++] = g.UseUniqueRegister(index);
|
||||
inputs[input_count++] = g.UseUniqueRegister(value);
|
||||
Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, input_count,
|
||||
inputs);
|
||||
VisitStore(node);
|
||||
}
|
||||
|
||||
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
|
||||
@ -2170,11 +2076,38 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
|
||||
}
|
||||
|
||||
void VisitAtomicBinaryOperation(InstructionSelector* selector, Node* node,
|
||||
ArchOpcode opcode) {
|
||||
ArchOpcode int8_op, ArchOpcode uint8_op,
|
||||
ArchOpcode int16_op, ArchOpcode uint16_op,
|
||||
ArchOpcode int32_op, ArchOpcode uint32_op,
|
||||
ArchOpcode int64_op, ArchOpcode uint64_op) {
|
||||
PPCOperandGenerator g(selector);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
MachineType type = AtomicOpType(node->op());
|
||||
|
||||
ArchOpcode opcode = kArchNop;
|
||||
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = int8_op;
|
||||
} else if (type == MachineType::Uint8()) {
|
||||
opcode = uint8_op;
|
||||
} else if (type == MachineType::Int16()) {
|
||||
opcode = int16_op;
|
||||
} else if (type == MachineType::Uint16()) {
|
||||
opcode = uint16_op;
|
||||
} else if (type == MachineType::Int32()) {
|
||||
opcode = int32_op;
|
||||
} else if (type == MachineType::Uint32()) {
|
||||
opcode = uint32_op;
|
||||
} else if (type == MachineType::Int64()) {
|
||||
opcode = int64_op;
|
||||
} else if (type == MachineType::Uint64()) {
|
||||
opcode = uint64_op;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
|
||||
AddressingMode addressing_mode = kMode_MRR;
|
||||
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
|
||||
@ -2195,32 +2128,31 @@ void VisitAtomicBinaryOperation(InstructionSelector* selector, Node* node,
|
||||
void InstructionSelector::VisitWord32AtomicBinaryOperation(
|
||||
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
|
||||
ArchOpcode uint16_op, ArchOpcode word32_op) {
|
||||
MachineType type = AtomicOpType(node->op());
|
||||
ArchOpcode opcode = kArchNop;
|
||||
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = int8_op;
|
||||
} else if (type == MachineType::Uint8()) {
|
||||
opcode = uint8_op;
|
||||
} else if (type == MachineType::Int16()) {
|
||||
opcode = int16_op;
|
||||
} else if (type == MachineType::Uint16()) {
|
||||
opcode = uint16_op;
|
||||
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
|
||||
opcode = word32_op;
|
||||
} else {
|
||||
// Unused
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
VisitAtomicBinaryOperation(this, node, opcode);
|
||||
|
||||
void InstructionSelector::VisitWord64AtomicBinaryOperation(
|
||||
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
|
||||
ArchOpcode uint64_op) {
|
||||
// Unused
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
#define VISIT_ATOMIC_BINOP(op) \
|
||||
void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
|
||||
VisitWord32AtomicBinaryOperation( \
|
||||
node, kWord32Atomic##op##Int8, kPPC_Atomic##op##Uint8, \
|
||||
kWord32Atomic##op##Int16, kPPC_Atomic##op##Uint16, \
|
||||
kPPC_Atomic##op##Word32); \
|
||||
VisitAtomicBinaryOperation( \
|
||||
this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \
|
||||
kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, \
|
||||
kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \
|
||||
kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \
|
||||
} \
|
||||
void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
|
||||
VisitAtomicBinaryOperation( \
|
||||
this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \
|
||||
kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, \
|
||||
kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \
|
||||
kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \
|
||||
}
|
||||
VISIT_ATOMIC_BINOP(Add)
|
||||
VISIT_ATOMIC_BINOP(Sub)
|
||||
@ -2229,39 +2161,6 @@ VISIT_ATOMIC_BINOP(Or)
|
||||
VISIT_ATOMIC_BINOP(Xor)
|
||||
#undef VISIT_ATOMIC_BINOP
|
||||
|
||||
void InstructionSelector::VisitWord64AtomicBinaryOperation(
|
||||
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
|
||||
ArchOpcode uint64_op) {
|
||||
MachineType type = AtomicOpType(node->op());
|
||||
ArchOpcode opcode = kArchNop;
|
||||
if (type == MachineType::Uint8()) {
|
||||
opcode = uint8_op;
|
||||
} else if (type == MachineType::Uint16()) {
|
||||
opcode = uint16_op;
|
||||
} else if (type == MachineType::Uint32()) {
|
||||
opcode = uint32_op;
|
||||
} else if (type == MachineType::Uint64()) {
|
||||
opcode = uint64_op;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
VisitAtomicBinaryOperation(this, node, opcode);
|
||||
}
|
||||
|
||||
#define VISIT_ATOMIC64_BINOP(op) \
|
||||
void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
|
||||
VisitWord64AtomicBinaryOperation( \
|
||||
node, kPPC_Atomic##op##Uint8, kPPC_Atomic##op##Uint16, \
|
||||
kPPC_Atomic##op##Word32, kPPC_Atomic##op##Word64); \
|
||||
}
|
||||
VISIT_ATOMIC64_BINOP(Add)
|
||||
VISIT_ATOMIC64_BINOP(Sub)
|
||||
VISIT_ATOMIC64_BINOP(And)
|
||||
VISIT_ATOMIC64_BINOP(Or)
|
||||
VISIT_ATOMIC64_BINOP(Xor)
|
||||
#undef VISIT_ATOMIC64_BINOP
|
||||
|
||||
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
@ -2251,6 +2251,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
|
||||
#endif
|
||||
case SYNC: {
|
||||
// todo - simulate sync
|
||||
__sync_synchronize();
|
||||
break;
|
||||
}
|
||||
case ICBI: {
|
||||
|
Loading…
Reference in New Issue
Block a user