Add {lda,stl}x?r{,b,h} instructions to ARM64 assembler/disassembler
They are not currently implemented by the ARM64 simulator. R=jarin@chromium.org, bmeurer@chromium.org Review-Url: https://codereview.chromium.org/1990073002 Cr-Commit-Position: refs/heads/master@{#36385}
This commit is contained in:
parent
31ac67ee61
commit
d208cdd7f1
@ -1716,6 +1716,83 @@ void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
|
||||
ldr_pcrel(rt, 0);
|
||||
}
|
||||
|
||||
void Assembler::ldar(const Register& rt, const Register& rn) {
|
||||
DCHECK(rn.Is64Bits());
|
||||
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAR_w : LDAR_x;
|
||||
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
|
||||
}
|
||||
|
||||
void Assembler::ldaxr(const Register& rt, const Register& rn) {
|
||||
DCHECK(rn.Is64Bits());
|
||||
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAXR_w : LDAXR_x;
|
||||
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
|
||||
}
|
||||
|
||||
void Assembler::stlr(const Register& rt, const Register& rn) {
|
||||
DCHECK(rn.Is64Bits());
|
||||
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLR_w : STLR_x;
|
||||
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
|
||||
}
|
||||
|
||||
void Assembler::stlxr(const Register& rs, const Register& rt,
|
||||
const Register& rn) {
|
||||
DCHECK(rs.Is32Bits());
|
||||
DCHECK(rn.Is64Bits());
|
||||
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
|
||||
Emit(op | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
|
||||
}
|
||||
|
||||
void Assembler::ldarb(const Register& rt, const Register& rn) {
|
||||
DCHECK(rt.Is32Bits());
|
||||
DCHECK(rn.Is64Bits());
|
||||
Emit(LDAR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
|
||||
}
|
||||
|
||||
void Assembler::ldaxrb(const Register& rt, const Register& rn) {
|
||||
DCHECK(rt.Is32Bits());
|
||||
DCHECK(rn.Is64Bits());
|
||||
Emit(LDAXR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
|
||||
}
|
||||
|
||||
void Assembler::stlrb(const Register& rt, const Register& rn) {
|
||||
DCHECK(rt.Is32Bits());
|
||||
DCHECK(rn.Is64Bits());
|
||||
Emit(STLR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
|
||||
}
|
||||
|
||||
void Assembler::stlxrb(const Register& rs, const Register& rt,
|
||||
const Register& rn) {
|
||||
DCHECK(rs.Is32Bits());
|
||||
DCHECK(rt.Is32Bits());
|
||||
DCHECK(rn.Is64Bits());
|
||||
Emit(STLXR_b | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
|
||||
}
|
||||
|
||||
void Assembler::ldarh(const Register& rt, const Register& rn) {
|
||||
DCHECK(rt.Is32Bits());
|
||||
DCHECK(rn.Is64Bits());
|
||||
Emit(LDAR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
|
||||
}
|
||||
|
||||
void Assembler::ldaxrh(const Register& rt, const Register& rn) {
|
||||
DCHECK(rt.Is32Bits());
|
||||
DCHECK(rn.Is64Bits());
|
||||
Emit(LDAXR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
|
||||
}
|
||||
|
||||
void Assembler::stlrh(const Register& rt, const Register& rn) {
|
||||
DCHECK(rt.Is32Bits());
|
||||
DCHECK(rn.Is64Bits());
|
||||
Emit(STLR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
|
||||
}
|
||||
|
||||
void Assembler::stlxrh(const Register& rs, const Register& rt,
|
||||
const Register& rn) {
|
||||
DCHECK(rs.Is32Bits());
|
||||
DCHECK(rt.Is32Bits());
|
||||
DCHECK(rn.Is64Bits());
|
||||
Emit(STLXR_h | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
|
||||
}
|
||||
|
||||
void Assembler::mov(const Register& rd, const Register& rm) {
|
||||
// Moves involving the stack pointer are encoded as add immediate with
|
||||
|
@ -1401,6 +1401,42 @@ class Assembler : public AssemblerBase {
|
||||
// Load literal to register.
|
||||
void ldr(const CPURegister& rt, const Immediate& imm);
|
||||
|
||||
// Load-acquire word.
|
||||
void ldar(const Register& rt, const Register& rn);
|
||||
|
||||
// Load-acquire exclusive word.
|
||||
void ldaxr(const Register& rt, const Register& rn);
|
||||
|
||||
// Store-release word.
|
||||
void stlr(const Register& rt, const Register& rn);
|
||||
|
||||
// Store-release exclusive word.
|
||||
void stlxr(const Register& rs, const Register& rt, const Register& rn);
|
||||
|
||||
// Load-acquire byte.
|
||||
void ldarb(const Register& rt, const Register& rn);
|
||||
|
||||
// Load-acquire exclusive byte.
|
||||
void ldaxrb(const Register& rt, const Register& rn);
|
||||
|
||||
// Store-release byte.
|
||||
void stlrb(const Register& rt, const Register& rn);
|
||||
|
||||
// Store-release exclusive byte.
|
||||
void stlxrb(const Register& rs, const Register& rt, const Register& rn);
|
||||
|
||||
// Load-acquire half-word.
|
||||
void ldarh(const Register& rt, const Register& rn);
|
||||
|
||||
// Load-acquire exclusive half-word.
|
||||
void ldaxrh(const Register& rt, const Register& rn);
|
||||
|
||||
// Store-release half-word.
|
||||
void stlrh(const Register& rt, const Register& rn);
|
||||
|
||||
// Store-release exclusive half-word.
|
||||
void stlxrh(const Register& rs, const Register& rt, const Register& rn);
|
||||
|
||||
// Move instructions. The default shift of -1 indicates that the move
|
||||
// instruction will calculate an appropriate 16-bit immediate and left shift
|
||||
// that is equal to the 64-bit immediate argument. If an explicit left shift
|
||||
@ -1695,6 +1731,11 @@ class Assembler : public AssemblerBase {
|
||||
return rt2.code() << Rt2_offset;
|
||||
}
|
||||
|
||||
static Instr Rs(CPURegister rs) {
|
||||
DCHECK(rs.code() != kSPRegInternalCode);
|
||||
return rs.code() << Rs_offset;
|
||||
}
|
||||
|
||||
// These encoding functions allow the stack pointer to be encoded, and
|
||||
// disallow the zero register.
|
||||
static Instr RdSP(Register rd) {
|
||||
|
@ -118,88 +118,88 @@ const unsigned kFloatMantissaBits = 23;
|
||||
const unsigned kFloatExponentBits = 8;
|
||||
|
||||
#define INSTRUCTION_FIELDS_LIST(V_) \
|
||||
/* Register fields */ \
|
||||
V_(Rd, 4, 0, Bits) /* Destination register. */ \
|
||||
V_(Rn, 9, 5, Bits) /* First source register. */ \
|
||||
V_(Rm, 20, 16, Bits) /* Second source register. */ \
|
||||
V_(Ra, 14, 10, Bits) /* Third source register. */ \
|
||||
V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
|
||||
V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
|
||||
/* Register fields */ \
|
||||
V_(Rd, 4, 0, Bits) /* Destination register. */ \
|
||||
V_(Rn, 9, 5, Bits) /* First source register. */ \
|
||||
V_(Rm, 20, 16, Bits) /* Second source register. */ \
|
||||
V_(Ra, 14, 10, Bits) /* Third source register. */ \
|
||||
V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
|
||||
V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
|
||||
/* store second source. */ \
|
||||
V_(PrefetchMode, 4, 0, Bits) \
|
||||
V_(Rs, 20, 16, Bits) /* Store-exclusive status */ \
|
||||
V_(PrefetchMode, 4, 0, Bits) \
|
||||
\
|
||||
/* Common bits */ \
|
||||
V_(SixtyFourBits, 31, 31, Bits) \
|
||||
V_(FlagsUpdate, 29, 29, Bits) \
|
||||
/* Common bits */ \
|
||||
V_(SixtyFourBits, 31, 31, Bits) \
|
||||
V_(FlagsUpdate, 29, 29, Bits) \
|
||||
\
|
||||
/* PC relative addressing */ \
|
||||
V_(ImmPCRelHi, 23, 5, SignedBits) \
|
||||
V_(ImmPCRelLo, 30, 29, Bits) \
|
||||
/* PC relative addressing */ \
|
||||
V_(ImmPCRelHi, 23, 5, SignedBits) \
|
||||
V_(ImmPCRelLo, 30, 29, Bits) \
|
||||
\
|
||||
/* Add/subtract/logical shift register */ \
|
||||
V_(ShiftDP, 23, 22, Bits) \
|
||||
V_(ImmDPShift, 15, 10, Bits) \
|
||||
/* Add/subtract/logical shift register */ \
|
||||
V_(ShiftDP, 23, 22, Bits) \
|
||||
V_(ImmDPShift, 15, 10, Bits) \
|
||||
\
|
||||
/* Add/subtract immediate */ \
|
||||
V_(ImmAddSub, 21, 10, Bits) \
|
||||
V_(ShiftAddSub, 23, 22, Bits) \
|
||||
/* Add/subtract immediate */ \
|
||||
V_(ImmAddSub, 21, 10, Bits) \
|
||||
V_(ShiftAddSub, 23, 22, Bits) \
|
||||
\
|
||||
/* Add/substract extend */ \
|
||||
V_(ImmExtendShift, 12, 10, Bits) \
|
||||
V_(ExtendMode, 15, 13, Bits) \
|
||||
/* Add/substract extend */ \
|
||||
V_(ImmExtendShift, 12, 10, Bits) \
|
||||
V_(ExtendMode, 15, 13, Bits) \
|
||||
\
|
||||
/* Move wide */ \
|
||||
V_(ImmMoveWide, 20, 5, Bits) \
|
||||
V_(ShiftMoveWide, 22, 21, Bits) \
|
||||
/* Move wide */ \
|
||||
V_(ImmMoveWide, 20, 5, Bits) \
|
||||
V_(ShiftMoveWide, 22, 21, Bits) \
|
||||
\
|
||||
/* Logical immediate, bitfield and extract */ \
|
||||
V_(BitN, 22, 22, Bits) \
|
||||
V_(ImmRotate, 21, 16, Bits) \
|
||||
V_(ImmSetBits, 15, 10, Bits) \
|
||||
V_(ImmR, 21, 16, Bits) \
|
||||
V_(ImmS, 15, 10, Bits) \
|
||||
/* Logical immediate, bitfield and extract */ \
|
||||
V_(BitN, 22, 22, Bits) \
|
||||
V_(ImmRotate, 21, 16, Bits) \
|
||||
V_(ImmSetBits, 15, 10, Bits) \
|
||||
V_(ImmR, 21, 16, Bits) \
|
||||
V_(ImmS, 15, 10, Bits) \
|
||||
\
|
||||
/* Test and branch immediate */ \
|
||||
V_(ImmTestBranch, 18, 5, SignedBits) \
|
||||
V_(ImmTestBranchBit40, 23, 19, Bits) \
|
||||
V_(ImmTestBranchBit5, 31, 31, Bits) \
|
||||
/* Test and branch immediate */ \
|
||||
V_(ImmTestBranch, 18, 5, SignedBits) \
|
||||
V_(ImmTestBranchBit40, 23, 19, Bits) \
|
||||
V_(ImmTestBranchBit5, 31, 31, Bits) \
|
||||
\
|
||||
/* Conditionals */ \
|
||||
V_(Condition, 15, 12, Bits) \
|
||||
V_(ConditionBranch, 3, 0, Bits) \
|
||||
V_(Nzcv, 3, 0, Bits) \
|
||||
V_(ImmCondCmp, 20, 16, Bits) \
|
||||
V_(ImmCondBranch, 23, 5, SignedBits) \
|
||||
/* Conditionals */ \
|
||||
V_(Condition, 15, 12, Bits) \
|
||||
V_(ConditionBranch, 3, 0, Bits) \
|
||||
V_(Nzcv, 3, 0, Bits) \
|
||||
V_(ImmCondCmp, 20, 16, Bits) \
|
||||
V_(ImmCondBranch, 23, 5, SignedBits) \
|
||||
\
|
||||
/* Floating point */ \
|
||||
V_(FPType, 23, 22, Bits) \
|
||||
V_(ImmFP, 20, 13, Bits) \
|
||||
V_(FPScale, 15, 10, Bits) \
|
||||
/* Floating point */ \
|
||||
V_(FPType, 23, 22, Bits) \
|
||||
V_(ImmFP, 20, 13, Bits) \
|
||||
V_(FPScale, 15, 10, Bits) \
|
||||
\
|
||||
/* Load Store */ \
|
||||
V_(ImmLS, 20, 12, SignedBits) \
|
||||
V_(ImmLSUnsigned, 21, 10, Bits) \
|
||||
V_(ImmLSPair, 21, 15, SignedBits) \
|
||||
V_(SizeLS, 31, 30, Bits) \
|
||||
V_(ImmShiftLS, 12, 12, Bits) \
|
||||
/* Load Store */ \
|
||||
V_(ImmLS, 20, 12, SignedBits) \
|
||||
V_(ImmLSUnsigned, 21, 10, Bits) \
|
||||
V_(ImmLSPair, 21, 15, SignedBits) \
|
||||
V_(SizeLS, 31, 30, Bits) \
|
||||
V_(ImmShiftLS, 12, 12, Bits) \
|
||||
\
|
||||
/* Other immediates */ \
|
||||
V_(ImmUncondBranch, 25, 0, SignedBits) \
|
||||
V_(ImmCmpBranch, 23, 5, SignedBits) \
|
||||
V_(ImmLLiteral, 23, 5, SignedBits) \
|
||||
V_(ImmException, 20, 5, Bits) \
|
||||
V_(ImmHint, 11, 5, Bits) \
|
||||
V_(ImmBarrierDomain, 11, 10, Bits) \
|
||||
V_(ImmBarrierType, 9, 8, Bits) \
|
||||
/* Other immediates */ \
|
||||
V_(ImmUncondBranch, 25, 0, SignedBits) \
|
||||
V_(ImmCmpBranch, 23, 5, SignedBits) \
|
||||
V_(ImmLLiteral, 23, 5, SignedBits) \
|
||||
V_(ImmException, 20, 5, Bits) \
|
||||
V_(ImmHint, 11, 5, Bits) \
|
||||
V_(ImmBarrierDomain, 11, 10, Bits) \
|
||||
V_(ImmBarrierType, 9, 8, Bits) \
|
||||
\
|
||||
/* System (MRS, MSR) */ \
|
||||
V_(ImmSystemRegister, 19, 5, Bits) \
|
||||
V_(SysO0, 19, 19, Bits) \
|
||||
V_(SysOp1, 18, 16, Bits) \
|
||||
V_(SysOp2, 7, 5, Bits) \
|
||||
V_(CRn, 15, 12, Bits) \
|
||||
V_(CRm, 11, 8, Bits) \
|
||||
|
||||
/* System (MRS, MSR) */ \
|
||||
V_(ImmSystemRegister, 19, 5, Bits) \
|
||||
V_(SysO0, 19, 19, Bits) \
|
||||
V_(SysOp1, 18, 16, Bits) \
|
||||
V_(SysOp2, 7, 5, Bits) \
|
||||
V_(CRn, 15, 12, Bits) \
|
||||
V_(CRm, 11, 8, Bits)
|
||||
|
||||
#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
|
||||
/* NZCV */ \
|
||||
@ -857,6 +857,29 @@ enum LoadStoreRegisterOffset {
|
||||
#undef LOAD_STORE_REGISTER_OFFSET
|
||||
};
|
||||
|
||||
// Load/store acquire/release
|
||||
enum LoadStoreAcquireReleaseOp {
|
||||
LoadStoreAcquireReleaseFixed = 0x08000000,
|
||||
LoadStoreAcquireReleaseFMask = 0x3F000000,
|
||||
LoadStoreAcquireReleaseMask = 0xCFC08000,
|
||||
STLXR_b = LoadStoreAcquireReleaseFixed | 0x00008000,
|
||||
LDAXR_b = LoadStoreAcquireReleaseFixed | 0x00408000,
|
||||
STLR_b = LoadStoreAcquireReleaseFixed | 0x00808000,
|
||||
LDAR_b = LoadStoreAcquireReleaseFixed | 0x00C08000,
|
||||
STLXR_h = LoadStoreAcquireReleaseFixed | 0x40008000,
|
||||
LDAXR_h = LoadStoreAcquireReleaseFixed | 0x40408000,
|
||||
STLR_h = LoadStoreAcquireReleaseFixed | 0x40808000,
|
||||
LDAR_h = LoadStoreAcquireReleaseFixed | 0x40C08000,
|
||||
STLXR_w = LoadStoreAcquireReleaseFixed | 0x80008000,
|
||||
LDAXR_w = LoadStoreAcquireReleaseFixed | 0x80408000,
|
||||
STLR_w = LoadStoreAcquireReleaseFixed | 0x80808000,
|
||||
LDAR_w = LoadStoreAcquireReleaseFixed | 0x80C08000,
|
||||
STLXR_x = LoadStoreAcquireReleaseFixed | 0xC0008000,
|
||||
LDAXR_x = LoadStoreAcquireReleaseFixed | 0xC0408000,
|
||||
STLR_x = LoadStoreAcquireReleaseFixed | 0xC0808000,
|
||||
LDAR_x = LoadStoreAcquireReleaseFixed | 0xC0C08000,
|
||||
};
|
||||
|
||||
// Conditional compare.
|
||||
enum ConditionalCompareOp {
|
||||
ConditionalCompareMask = 0x60000000,
|
||||
|
@ -217,8 +217,15 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
|
||||
if (instr->Bit(28) == 0) {
|
||||
if (instr->Bit(29) == 0) {
|
||||
if (instr->Bit(26) == 0) {
|
||||
// TODO(all): VisitLoadStoreExclusive.
|
||||
if (instr->Mask(0xA08000) == 0x800000 ||
|
||||
instr->Mask(0xA00000) == 0xA00000) {
|
||||
V::VisitUnallocated(instr);
|
||||
} else if (instr->Mask(0x808000) == 0) {
|
||||
// Load/Store exclusive without acquire/release are unimplemented.
|
||||
V::VisitUnimplemented(instr);
|
||||
} else {
|
||||
V::VisitLoadStoreAcquireRelease(instr);
|
||||
}
|
||||
} else {
|
||||
DecodeAdvSIMDLoadStore(instr);
|
||||
}
|
||||
|
@ -39,6 +39,7 @@ namespace internal {
|
||||
V(LoadStorePreIndex) \
|
||||
V(LoadStoreRegisterOffset) \
|
||||
V(LoadStoreUnsignedOffset) \
|
||||
V(LoadStoreAcquireRelease) \
|
||||
V(LogicalShifted) \
|
||||
V(AddSubShifted) \
|
||||
V(AddSubExtended) \
|
||||
|
@ -914,6 +914,34 @@ void DisassemblingDecoder::VisitLoadStorePairOffset(Instruction* instr) {
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
|
||||
void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction *instr) {
|
||||
const char *mnemonic = "unimplemented";
|
||||
const char *form = "'Wt, ['Xn]";
|
||||
const char *form_x = "'Xt, ['Xn]";
|
||||
const char *form_stlx = "'Ws, 'Wt, ['Xn]";
|
||||
const char *form_stlx_x = "'Ws, 'Xt, ['Xn]";
|
||||
|
||||
switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
|
||||
case LDAXR_b: mnemonic = "ldaxrb"; break;
|
||||
case STLR_b: mnemonic = "stlrb"; break;
|
||||
case LDAR_b: mnemonic = "ldarb"; break;
|
||||
case LDAXR_h: mnemonic = "ldaxrh"; break;
|
||||
case STLR_h: mnemonic = "stlrh"; break;
|
||||
case LDAR_h: mnemonic = "ldarh"; break;
|
||||
case LDAXR_w: mnemonic = "ldaxr"; break;
|
||||
case STLR_w: mnemonic = "stlr"; break;
|
||||
case LDAR_w: mnemonic = "ldar"; break;
|
||||
case LDAXR_x: mnemonic = "ldaxr"; form = form_x; break;
|
||||
case STLR_x: mnemonic = "stlr"; form = form_x; break;
|
||||
case LDAR_x: mnemonic = "ldar"; form = form_x; break;
|
||||
case STLXR_h: mnemonic = "stlxrh"; form = form_stlx; break;
|
||||
case STLXR_b: mnemonic = "stlxrb"; form = form_stlx; break;
|
||||
case STLXR_w: mnemonic = "stlxr"; form = form_stlx; break;
|
||||
case STLXR_x: mnemonic = "stlxr"; form = form_stlx_x; break;
|
||||
default: form = "(LoadStoreAcquireReleaseMask)";
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
|
||||
void DisassemblingDecoder::VisitFPCompare(Instruction* instr) {
|
||||
const char *mnemonic = "unimplemented";
|
||||
@ -1295,6 +1323,9 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 's':
|
||||
reg_num = instr->Rs();
|
||||
break;
|
||||
default: UNREACHABLE();
|
||||
}
|
||||
|
||||
|
@ -429,6 +429,31 @@ void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
|
||||
InstrumentLoadStore(instr);
|
||||
}
|
||||
|
||||
void Instrument::VisitLoadStoreAcquireRelease(Instruction* instr) {
|
||||
Update();
|
||||
static Counter* load_counter = GetCounter("Load Acquire");
|
||||
static Counter* store_counter = GetCounter("Store Release");
|
||||
|
||||
switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
|
||||
case LDAR_b: // Fall-through.
|
||||
case LDAR_h: // Fall-through.
|
||||
case LDAR_w: // Fall-through.
|
||||
case LDAR_x: // Fall-through.
|
||||
case LDAXR_b: // Fall-through.
|
||||
case LDAXR_h: // Fall-through.
|
||||
case LDAXR_w: // Fall-through.
|
||||
case LDAXR_x: load_counter->Increment(); break;
|
||||
case STLR_b: // Fall-through.
|
||||
case STLR_h: // Fall-through.
|
||||
case STLR_w: // Fall-through.
|
||||
case STLR_x: // Fall-through.
|
||||
case STLXR_b: // Fall-through.
|
||||
case STLXR_h: // Fall-through.
|
||||
case STLXR_w: // Fall-through.
|
||||
case STLXR_x: store_counter->Increment(); break;
|
||||
default: UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
void Instrument::VisitLogicalShifted(Instruction* instr) {
|
||||
Update();
|
||||
|
@ -309,6 +309,22 @@ LS_MACRO_LIST(DEFINE_FUNCTION)
|
||||
LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
|
||||
#undef DEFINE_FUNCTION
|
||||
|
||||
#define DECLARE_FUNCTION(FN, OP) \
|
||||
void MacroAssembler::FN(const Register& rt, const Register& rn) { \
|
||||
DCHECK(allow_macro_instructions_); \
|
||||
OP(rt, rn); \
|
||||
}
|
||||
LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
|
||||
#undef DECLARE_FUNCTION
|
||||
|
||||
#define DECLARE_FUNCTION(FN, OP) \
|
||||
void MacroAssembler::FN(const Register& rs, const Register& rt, \
|
||||
const Register& rn) { \
|
||||
DCHECK(allow_macro_instructions_); \
|
||||
OP(rs, rt, rn); \
|
||||
}
|
||||
STLX_MACRO_LIST(DECLARE_FUNCTION)
|
||||
#undef DECLARE_FUNCTION
|
||||
|
||||
void MacroAssembler::Asr(const Register& rd,
|
||||
const Register& rn,
|
||||
|
@ -68,6 +68,21 @@ namespace internal {
|
||||
V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
|
||||
V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
|
||||
|
||||
#define LDA_STL_MACRO_LIST(V) \
|
||||
V(Ldarb, ldarb) \
|
||||
V(Ldarh, ldarh) \
|
||||
V(Ldar, ldar) \
|
||||
V(Ldaxrb, ldaxrb) \
|
||||
V(Ldaxrh, ldaxrh) \
|
||||
V(Ldaxr, ldaxr) \
|
||||
V(Stlrb, stlrb) \
|
||||
V(Stlrh, stlrh) \
|
||||
V(Stlr, stlr)
|
||||
|
||||
#define STLX_MACRO_LIST(V) \
|
||||
V(Stlxrb, stlxrb) \
|
||||
V(Stlxrh, stlxrh) \
|
||||
V(Stlxr, stlxr)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Static helper functions
|
||||
@ -295,6 +310,17 @@ class MacroAssembler : public Assembler {
|
||||
void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
|
||||
const MemOperand& addr, LoadStorePairOp op);
|
||||
|
||||
// Load-acquire/store-release macros.
|
||||
#define DECLARE_FUNCTION(FN, OP) \
|
||||
inline void FN(const Register& rt, const Register& rn);
|
||||
LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
|
||||
#undef DECLARE_FUNCTION
|
||||
|
||||
#define DECLARE_FUNCTION(FN, OP) \
|
||||
inline void FN(const Register& rs, const Register& rt, const Register& rn);
|
||||
STLX_MACRO_LIST(DECLARE_FUNCTION)
|
||||
#undef DECLARE_FUNCTION
|
||||
|
||||
// V8-specific load/store helpers.
|
||||
void Load(const Register& rt, const MemOperand& addr, Representation r);
|
||||
void Store(const Register& rt, const MemOperand& addr, Representation r);
|
||||
|
@ -1900,6 +1900,9 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
|
||||
}
|
||||
}
|
||||
|
||||
void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
|
||||
// TODO(binji)
|
||||
}
|
||||
|
||||
void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
|
||||
if ((address >= stack_limit_) && (address < stack)) {
|
||||
|
@ -1259,6 +1259,24 @@ TEST_(load_store_pair) {
|
||||
CLEANUP();
|
||||
}
|
||||
|
||||
TEST_(load_store_acquire_release) {
|
||||
SET_UP_MASM();
|
||||
|
||||
COMPARE(ldar(w0, x1), "ldar w0, [x1]");
|
||||
COMPARE(ldarb(w2, x3), "ldarb w2, [x3]");
|
||||
COMPARE(ldarh(w4, x5), "ldarh w4, [x5]");
|
||||
COMPARE(ldaxr(w6, x7), "ldaxr w6, [x7]");
|
||||
COMPARE(ldaxrb(w8, x9), "ldaxrb w8, [x9]");
|
||||
COMPARE(ldaxrh(w10, x11), "ldaxrh w10, [x11]");
|
||||
COMPARE(stlr(w12, x13), "stlr w12, [x13]");
|
||||
COMPARE(stlrb(w14, x15), "stlrb w14, [x15]");
|
||||
COMPARE(stlrh(w16, x17), "stlrh w16, [x17]");
|
||||
COMPARE(stlxr(w18, w19, x20), "stlxr w18, w19, [x20]");
|
||||
COMPARE(stlxrb(w21, w22, x23), "stlxrb w21, w22, [x23]");
|
||||
COMPARE(stlxrh(w24, w25, x26), "stlxrh w24, w25, [x26]");
|
||||
|
||||
CLEANUP();
|
||||
}
|
||||
|
||||
#if 0 // TODO(all): enable.
|
||||
TEST_(load_literal) {
|
||||
|
Loading…
Reference in New Issue
Block a user