[arm] Cleanup addrmod1 encoding and Operand class

This cleanup is the result of trying to modify the `Assembler::addrmod1` method
and realising it's very easy to break it. It handles three groups of
instructions with different operands and uses `r0` when a register is not used:

- General case:            rd, rn, (rm|rm shift #imm|rm shift rs)
- Comparison instructions:     rn, (rm|rm shift #imm|rm shift rs)
- Move instructions        rd,     (rm|rm shift #imm|rm shift rs)

Let's use `no_reg` instead of `r0` with explicit checks and assertions so that
it's clear this method is used with multiple types of instructions.
Additionaly, keep the order of operands as "rd", "rn", "rm".

As drive-by fixes, I've taken the opportunity to add a few helper methods to the
`Operand` class.

Bug: 
Change-Id: If8140d804bc90dea1d3c186b3cee54297f91462a
Reviewed-on: https://chromium-review.googlesource.com/531284
Commit-Queue: Georg Neis <neis@chromium.org>
Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#45949}
This commit is contained in:
Pierre Langlois 2017-06-12 15:50:02 +01:00 committed by Commit Bot
parent 51a6789bed
commit 30a29fa26d
4 changed files with 181 additions and 143 deletions

View File

@ -1045,15 +1045,14 @@ void Assembler::next(Label* L) {
} }
} }
namespace {
// Low-level code emission routines depending on the addressing mode. // Low-level code emission routines depending on the addressing mode.
// If this returns true then you have to use the rotate_imm and immed_8 // If this returns true then you have to use the rotate_imm and immed_8
// that it returns, because it may have already changed the instruction // that it returns, because it may have already changed the instruction
// to match them! // to match them!
static bool fits_shifter(uint32_t imm32, bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
uint32_t* rotate_imm, Instr* instr) {
uint32_t* immed_8,
Instr* instr) {
// imm32 must be unsigned. // imm32 must be unsigned.
for (int rot = 0; rot < 16; rot++) { for (int rot = 0; rot < 16; rot++) {
uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot); uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
@ -1067,7 +1066,7 @@ static bool fits_shifter(uint32_t imm32,
// immediate fits, change the opcode. // immediate fits, change the opcode.
if (instr != NULL) { if (instr != NULL) {
if ((*instr & kMovMvnMask) == kMovMvnPattern) { if ((*instr & kMovMvnMask) == kMovMvnPattern) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { if (FitsShifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kMovMvnFlip; *instr ^= kMovMvnFlip;
return true; return true;
} else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) { } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
@ -1081,7 +1080,7 @@ static bool fits_shifter(uint32_t imm32,
} }
} }
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) { if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
*instr ^= kCmpCmnFlip; *instr ^= kCmpCmnFlip;
return true; return true;
} }
@ -1089,13 +1088,13 @@ static bool fits_shifter(uint32_t imm32,
Instr alu_insn = (*instr & kALUMask); Instr alu_insn = (*instr & kALUMask);
if (alu_insn == ADD || if (alu_insn == ADD ||
alu_insn == SUB) { alu_insn == SUB) {
if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) { if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
*instr ^= kAddSubFlip; *instr ^= kAddSubFlip;
return true; return true;
} }
} else if (alu_insn == AND || } else if (alu_insn == AND ||
alu_insn == BIC) { alu_insn == BIC) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { if (FitsShifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kAndBicFlip; *instr ^= kAndBicFlip;
return true; return true;
} }
@ -1105,13 +1104,11 @@ static bool fits_shifter(uint32_t imm32,
return false; return false;
} }
namespace {
// We have to use the temporary register for things that can be relocated even // We have to use the temporary register for things that can be relocated even
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly // space. There is no guarantee that the relocated location can be similarly
// encoded. // encoded.
bool must_output_reloc_info(RelocInfo::Mode rmode, const Assembler* assembler) { bool MustOutputRelocInfo(RelocInfo::Mode rmode, const Assembler* assembler) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) { if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
if (assembler != NULL && assembler->predictable_code_size()) return true; if (assembler != NULL && assembler->predictable_code_size()) return true;
return assembler->serializer_enabled(); return assembler->serializer_enabled();
@ -1121,9 +1118,9 @@ bool must_output_reloc_info(RelocInfo::Mode rmode, const Assembler* assembler) {
return true; return true;
} }
bool use_mov_immediate_load(const Operand& x, const Assembler* assembler) { bool UseMovImmediateLoad(const Operand& x, const Assembler* assembler) {
DCHECK(assembler != nullptr); DCHECK(assembler != nullptr);
if (x.must_output_reloc_info(assembler)) { if (x.MustOutputRelocInfo(assembler)) {
// Prefer constant pool if data is likely to be patched. // Prefer constant pool if data is likely to be patched.
return false; return false;
} else { } else {
@ -1134,22 +1131,22 @@ bool use_mov_immediate_load(const Operand& x, const Assembler* assembler) {
} // namespace } // namespace
bool Operand::must_output_reloc_info(const Assembler* assembler) const { bool Operand::MustOutputRelocInfo(const Assembler* assembler) const {
return v8::internal::must_output_reloc_info(rmode_, assembler); return v8::internal::MustOutputRelocInfo(rmode_, assembler);
} }
int Operand::instructions_required(const Assembler* assembler, int Operand::InstructionsRequired(const Assembler* assembler,
Instr instr) const { Instr instr) const {
DCHECK(assembler != nullptr); DCHECK(assembler != nullptr);
if (rm_.is_valid()) return 1; if (rm_.is_valid()) return 1;
uint32_t dummy1, dummy2; uint32_t dummy1, dummy2;
if (must_output_reloc_info(assembler) || if (MustOutputRelocInfo(assembler) ||
!fits_shifter(immediate(), &dummy1, &dummy2, &instr)) { !FitsShifter(immediate(), &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of // The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. First account for the instructions required // constant pool is required. First account for the instructions required
// for the constant pool or immediate load // for the constant pool or immediate load
int instructions; int instructions;
if (use_mov_immediate_load(*this, assembler)) { if (UseMovImmediateLoad(*this, assembler)) {
DCHECK(CpuFeatures::IsSupported(ARMv7)); DCHECK(CpuFeatures::IsSupported(ARMv7));
// A movw / movt immediate load. // A movw / movt immediate load.
instructions = 2; instructions = 2;
@ -1171,15 +1168,13 @@ int Operand::instructions_required(const Assembler* assembler,
} }
} }
void Assembler::Move32BitImmediate(Register rd, const Operand& x,
void Assembler::move_32_bit_immediate(Register rd, Condition cond) {
const Operand& x, if (UseMovImmediateLoad(x, this)) {
Condition cond) { // UseMovImmediateLoad should return false when we need to output
if (use_mov_immediate_load(x, this)) {
// use_mov_immediate_load should return false when we need to output
// relocation info, since we prefer the constant pool for values that // relocation info, since we prefer the constant pool for values that
// can be patched. // can be patched.
DCHECK(!x.must_output_reloc_info(this)); DCHECK(!x.MustOutputRelocInfo(this));
Register target = rd.code() == pc.code() ? ip : rd; Register target = rd.code() == pc.code() ? ip : rd;
if (CpuFeatures::IsSupported(ARMv7)) { if (CpuFeatures::IsSupported(ARMv7)) {
uint32_t imm32 = static_cast<uint32_t>(x.immediate()); uint32_t imm32 = static_cast<uint32_t>(x.immediate());
@ -1192,7 +1187,7 @@ void Assembler::move_32_bit_immediate(Register rd,
} }
} else { } else {
int32_t immediate; int32_t immediate;
if (x.is_heap_number()) { if (x.IsHeapNumber()) {
RequestHeapNumber(x.heap_number()); RequestHeapNumber(x.heap_number());
immediate = 0; immediate = 0;
} else { } else {
@ -1203,51 +1198,84 @@ void Assembler::move_32_bit_immediate(Register rd,
} }
} }
void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
void Assembler::addrmod1(Instr instr, const Operand& x) {
Register rn,
Register rd,
const Operand& x) {
CheckBuffer(); CheckBuffer();
uint32_t opcode = instr & kOpCodeMask;
bool set_flags = (instr & S) != 0;
DCHECK((opcode == ADC) || (opcode == ADD) || (opcode == AND) ||
(opcode == BIC) || (opcode == EOR) || (opcode == ORR) ||
(opcode == RSB) || (opcode == RSC) || (opcode == SBC) ||
(opcode == SUB) || (opcode == CMN) || (opcode == CMP) ||
(opcode == TEQ) || (opcode == TST) || (opcode == MOV) ||
(opcode == MVN));
// For comparison instructions, rd is not defined.
DCHECK(rd.is_valid() || (opcode == CMN) || (opcode == CMP) ||
(opcode == TEQ) || (opcode == TST));
// For move instructions, rn is not defined.
DCHECK(rn.is_valid() || (opcode == MOV) || (opcode == MVN));
DCHECK(rd.is_valid() || rn.is_valid());
DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0); DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
if (!x.rm_.is_valid()) { if (!AddrMode1TryEncodeOperand(&instr, x)) {
// Immediate. DCHECK(x.IsImmediate());
uint32_t rotate_imm; // Upon failure to encode, the opcode should not have changed.
uint32_t immed_8; DCHECK(opcode == (instr & kOpCodeMask));
if (x.must_output_reloc_info(this) || Condition cond = Instruction::ConditionField(instr);
!fits_shifter(x.immediate(), &rotate_imm, &immed_8, &instr)) { if ((opcode == MOV) && !set_flags) {
// Generate a sequence of mov instructions or a load from the constant
// pool only for a MOV instruction which does not set the flags.
DCHECK(!rn.is_valid());
Move32BitImmediate(rd, x, cond);
} else {
// The immediate operand cannot be encoded as a shifter operand, so load // The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip. // it first to register ip and change the original instruction to use ip.
// However, if the original instruction is a 'mov rd, x' (not setting the CHECK(!rn.is(ip)); // rn should never be ip, or it will be trashed.
// condition code), then replace it with a 'ldr rd, [pc]'. mov(ip, x, LeaveCC, cond);
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed AddrMode1(instr, rd, rn, Operand(ip));
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
move_32_bit_immediate(rd, x, cond);
} else {
mov(ip, x, LeaveCC, cond);
addrmod1(instr, rn, rd, Operand(ip));
}
return;
} }
instr |= I | rotate_imm*B8 | immed_8; return;
} else if (!x.rs_.is_valid()) { }
// Immediate shift. if (!rd.is_valid()) {
instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); // Emit a comparison instruction.
} else { emit(instr | rn.code() * B16);
// Register shift. } else if (!rn.is_valid()) {
DCHECK(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); // Emit a move instruction. If the operand is a register-shifted register,
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); // then prevent the destination from being PC as this is unpredictable.
DCHECK(!x.IsRegisterShiftedRegister() || !rd.is(pc));
emit(instr | rd.code() * B12);
} else {
emit(instr | rn.code() * B16 | rd.code() * B12);
} }
emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc)) { if (rn.is(pc) || x.rm_.is(pc)) {
// Block constant pool emission for one instruction after reading pc. // Block constant pool emission for one instruction after reading pc.
BlockConstPoolFor(1); BlockConstPoolFor(1);
} }
} }
bool Assembler::AddrMode1TryEncodeOperand(Instr* instr, const Operand& x) {
if (x.IsImmediate()) {
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
if (x.MustOutputRelocInfo(this) ||
!FitsShifter(x.immediate(), &rotate_imm, &immed_8, instr)) {
// Let the caller handle generating multiple instructions.
return false;
}
*instr |= I | rotate_imm * B8 | immed_8;
} else if (x.IsImmediateShiftedRegister()) {
*instr |= x.shift_imm_ * B7 | x.shift_op_ | x.rm_.code();
} else {
DCHECK(x.IsRegisterShiftedRegister());
// It is unpredictable to use the PC in this case.
DCHECK(!x.rm_.is(pc) && !x.rs_.is(pc));
*instr |= x.rs_.code() * B8 | x.shift_op_ | B4 | x.rm_.code();
}
void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { return true;
}
void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
DCHECK((instr & ~(kCondMask | B | L)) == B26); DCHECK((instr & ~(kCondMask | B | L)) == B26);
int am = x.am_; int am = x.am_;
if (!x.rm_.is_valid()) { if (!x.rm_.is_valid()) {
@ -1262,7 +1290,7 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
// rn (and rd in a load) should never be ip, or will be trashed. // rn (and rd in a load) should never be ip, or will be trashed.
DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_)); AddrMode2(instr, rd, MemOperand(x.rn_, ip, x.am_));
return; return;
} }
DCHECK(offset_12 >= 0); // no masking needed DCHECK(offset_12 >= 0); // no masking needed
@ -1278,8 +1306,7 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
} }
void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7)); DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
DCHECK(x.rn_.is_valid()); DCHECK(x.rn_.is_valid());
int am = x.am_; int am = x.am_;
@ -1295,7 +1322,7 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
// rn (and rd in a load) should never be ip, or will be trashed. // rn (and rd in a load) should never be ip, or will be trashed.
DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); AddrMode3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return; return;
} }
DCHECK(offset_8 >= 0); // no masking needed DCHECK(offset_8 >= 0); // no masking needed
@ -1306,7 +1333,7 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
Instruction::ConditionField(instr)); Instruction::ConditionField(instr));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); AddrMode3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return; return;
} else { } else {
// Register offset. // Register offset.
@ -1317,16 +1344,14 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
} }
void Assembler::AddrMode4(Instr instr, Register rn, RegList rl) {
void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27); DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
DCHECK(rl != 0); DCHECK(rl != 0);
DCHECK(!rn.is(pc)); DCHECK(!rn.is(pc));
emit(instr | rn.code()*B16 | rl); emit(instr | rn.code()*B16 | rl);
} }
void Assembler::AddrMode5(Instr instr, CRegister crd, const MemOperand& x) {
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
// Unindexed addressing is not encoded by this function. // Unindexed addressing is not encoded by this function.
DCHECK_EQ((B27 | B26), DCHECK_EQ((B27 | B26),
(instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L))); (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
@ -1342,7 +1367,7 @@ void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte
DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
// Post-indexed addressing requires W == 1; different than in addrmod2/3. // Post-indexed addressing requires W == 1; different than in AddrMode2/3.
if ((am & P) == 0) if ((am & P) == 0)
am |= W; am |= W;
@ -1436,19 +1461,19 @@ void Assembler::blx(Label* L) {
void Assembler::and_(Register dst, Register src1, const Operand& src2, void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | AND | s, src1, dst, src2); AddrMode1(cond | AND | s, dst, src1, src2);
} }
void Assembler::eor(Register dst, Register src1, const Operand& src2, void Assembler::eor(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | EOR | s, src1, dst, src2); AddrMode1(cond | EOR | s, dst, src1, src2);
} }
void Assembler::sub(Register dst, Register src1, const Operand& src2, void Assembler::sub(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | SUB | s, src1, dst, src2); AddrMode1(cond | SUB | s, dst, src1, src2);
} }
void Assembler::sub(Register dst, Register src1, Register src2, SBit s, void Assembler::sub(Register dst, Register src1, Register src2, SBit s,
@ -1458,13 +1483,13 @@ void Assembler::sub(Register dst, Register src1, Register src2, SBit s,
void Assembler::rsb(Register dst, Register src1, const Operand& src2, void Assembler::rsb(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | RSB | s, src1, dst, src2); AddrMode1(cond | RSB | s, dst, src1, src2);
} }
void Assembler::add(Register dst, Register src1, const Operand& src2, void Assembler::add(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | ADD | s, src1, dst, src2); AddrMode1(cond | ADD | s, dst, src1, src2);
} }
void Assembler::add(Register dst, Register src1, Register src2, SBit s, void Assembler::add(Register dst, Register src1, Register src2, SBit s,
@ -1474,24 +1499,24 @@ void Assembler::add(Register dst, Register src1, Register src2, SBit s,
void Assembler::adc(Register dst, Register src1, const Operand& src2, void Assembler::adc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | ADC | s, src1, dst, src2); AddrMode1(cond | ADC | s, dst, src1, src2);
} }
void Assembler::sbc(Register dst, Register src1, const Operand& src2, void Assembler::sbc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | SBC | s, src1, dst, src2); AddrMode1(cond | SBC | s, dst, src1, src2);
} }
void Assembler::rsc(Register dst, Register src1, const Operand& src2, void Assembler::rsc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | RSC | s, src1, dst, src2); AddrMode1(cond | RSC | s, dst, src1, src2);
} }
void Assembler::tst(Register src1, const Operand& src2, Condition cond) { void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | TST | S, src1, r0, src2); AddrMode1(cond | TST | S, no_reg, src1, src2);
} }
void Assembler::tst(Register src1, Register src2, Condition cond) { void Assembler::tst(Register src1, Register src2, Condition cond) {
@ -1499,12 +1524,12 @@ void Assembler::tst(Register src1, Register src2, Condition cond) {
} }
void Assembler::teq(Register src1, const Operand& src2, Condition cond) { void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | TEQ | S, src1, r0, src2); AddrMode1(cond | TEQ | S, no_reg, src1, src2);
} }
void Assembler::cmp(Register src1, const Operand& src2, Condition cond) { void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | CMP | S, src1, r0, src2); AddrMode1(cond | CMP | S, no_reg, src1, src2);
} }
void Assembler::cmp(Register src1, Register src2, Condition cond) { void Assembler::cmp(Register src1, Register src2, Condition cond) {
@ -1519,13 +1544,13 @@ void Assembler::cmp_raw_immediate(
void Assembler::cmn(Register src1, const Operand& src2, Condition cond) { void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | CMN | S, src1, r0, src2); AddrMode1(cond | CMN | S, no_reg, src1, src2);
} }
void Assembler::orr(Register dst, Register src1, const Operand& src2, void Assembler::orr(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | ORR | s, src1, dst, src2); AddrMode1(cond | ORR | s, dst, src1, src2);
} }
void Assembler::orr(Register dst, Register src1, Register src2, SBit s, void Assembler::orr(Register dst, Register src1, Register src2, SBit s,
@ -1537,8 +1562,8 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
// Don't allow nop instructions in the form mov rn, rn to be generated using // Don't allow nop instructions in the form mov rn, rn to be generated using
// the mov instruction. They must be generated using nop(int/NopMarkerTypes) // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
// or MarkCode(int/NopMarkerTypes) pseudo instructions. // or MarkCode(int/NopMarkerTypes) pseudo instructions.
DCHECK(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); DCHECK(!(src.IsRegister() && src.rm().is(dst) && s == LeaveCC && cond == al));
addrmod1(cond | MOV | s, r0, dst, src); AddrMode1(cond | MOV | s, dst, no_reg, src);
} }
void Assembler::mov(Register dst, Register src, SBit s, Condition cond) { void Assembler::mov(Register dst, Register src, SBit s, Condition cond) {
@ -1598,17 +1623,17 @@ void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
void Assembler::bic(Register dst, Register src1, const Operand& src2, void Assembler::bic(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | BIC | s, src1, dst, src2); AddrMode1(cond | BIC | s, dst, src1, src2);
} }
void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) { void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
addrmod1(cond | MVN | s, r0, dst, src); AddrMode1(cond | MVN | s, dst, no_reg, src);
} }
void Assembler::asr(Register dst, Register src1, const Operand& src2, SBit s, void Assembler::asr(Register dst, Register src1, const Operand& src2, SBit s,
Condition cond) { Condition cond) {
if (src2.is_reg()) { if (src2.IsRegister()) {
mov(dst, Operand(src1, ASR, src2.rm()), s, cond); mov(dst, Operand(src1, ASR, src2.rm()), s, cond);
} else { } else {
mov(dst, Operand(src1, ASR, src2.immediate()), s, cond); mov(dst, Operand(src1, ASR, src2.immediate()), s, cond);
@ -1617,7 +1642,7 @@ void Assembler::asr(Register dst, Register src1, const Operand& src2, SBit s,
void Assembler::lsl(Register dst, Register src1, const Operand& src2, SBit s, void Assembler::lsl(Register dst, Register src1, const Operand& src2, SBit s,
Condition cond) { Condition cond) {
if (src2.is_reg()) { if (src2.IsRegister()) {
mov(dst, Operand(src1, LSL, src2.rm()), s, cond); mov(dst, Operand(src1, LSL, src2.rm()), s, cond);
} else { } else {
mov(dst, Operand(src1, LSL, src2.immediate()), s, cond); mov(dst, Operand(src1, LSL, src2.immediate()), s, cond);
@ -1626,7 +1651,7 @@ void Assembler::lsl(Register dst, Register src1, const Operand& src2, SBit s,
void Assembler::lsr(Register dst, Register src1, const Operand& src2, SBit s, void Assembler::lsr(Register dst, Register src1, const Operand& src2, SBit s,
Condition cond) { Condition cond) {
if (src2.is_reg()) { if (src2.IsRegister()) {
mov(dst, Operand(src1, LSR, src2.rm()), s, cond); mov(dst, Operand(src1, LSR, src2.rm()), s, cond);
} else { } else {
mov(dst, Operand(src1, LSR, src2.immediate()), s, cond); mov(dst, Operand(src1, LSR, src2.immediate()), s, cond);
@ -1762,8 +1787,8 @@ void Assembler::usat(Register dst,
Condition cond) { Condition cond) {
DCHECK(!dst.is(pc) && !src.rm_.is(pc)); DCHECK(!dst.is(pc) && !src.rm_.is(pc));
DCHECK((satpos >= 0) && (satpos <= 31)); DCHECK((satpos >= 0) && (satpos <= 31));
DCHECK(src.IsImmediateShiftedRegister());
DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL)); DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
DCHECK(src.rs_.is(no_reg));
int sh = 0; int sh = 0;
if (src.shift_op_ == ASR) { if (src.shift_op_ == ASR) {
@ -1856,9 +1881,8 @@ void Assembler::pkhbt(Register dst,
// Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0) // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
DCHECK(!dst.is(pc)); DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc)); DCHECK(!src1.is(pc));
DCHECK(src2.IsImmediateShiftedRegister());
DCHECK(!src2.rm().is(pc)); DCHECK(!src2.rm().is(pc));
DCHECK(!src2.rm().is(no_reg));
DCHECK(src2.rs().is(no_reg));
DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31)); DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
DCHECK(src2.shift_op() == LSL); DCHECK(src2.shift_op() == LSL);
emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 | emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
@ -1875,9 +1899,8 @@ void Assembler::pkhtb(Register dst,
// Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0) // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
DCHECK(!dst.is(pc)); DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc)); DCHECK(!src1.is(pc));
DCHECK(src2.IsImmediateShiftedRegister());
DCHECK(!src2.rm().is(pc)); DCHECK(!src2.rm().is(pc));
DCHECK(!src2.rm().is(no_reg));
DCHECK(src2.rs().is(no_reg));
DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32)); DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
DCHECK(src2.shift_op() == ASR); DCHECK(src2.shift_op() == ASR);
int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_; int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
@ -2024,20 +2047,20 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
DCHECK((fields & 0x000f0000) != 0); // At least one field must be set. DCHECK((fields & 0x000f0000) != 0); // At least one field must be set.
DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR)); DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR));
Instr instr; Instr instr;
if (!src.rm_.is_valid()) { if (src.IsImmediate()) {
// Immediate. // Immediate.
uint32_t rotate_imm; uint32_t rotate_imm;
uint32_t immed_8; uint32_t immed_8;
if (src.must_output_reloc_info(this) || if (src.MustOutputRelocInfo(this) ||
!fits_shifter(src.immediate(), &rotate_imm, &immed_8, NULL)) { !FitsShifter(src.immediate(), &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip. // Immediate operand cannot be encoded, load it first to register ip.
move_32_bit_immediate(ip, src); Move32BitImmediate(ip, src);
msr(fields, Operand(ip), cond); msr(fields, Operand(ip), cond);
return; return;
} }
instr = I | rotate_imm*B8 | immed_8; instr = I | rotate_imm*B8 | immed_8;
} else { } else {
DCHECK(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed DCHECK(src.IsRegister()); // Only rm is allowed.
instr = src.rm_.code(); instr = src.rm_.code();
} }
emit(cond | instr | B24 | B21 | fields | 15*B12); emit(cond | instr | B24 | B21 | fields | 15*B12);
@ -2046,42 +2069,42 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Load/Store instructions. // Load/Store instructions.
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
addrmod2(cond | B26 | L, dst, src); AddrMode2(cond | B26 | L, dst, src);
} }
void Assembler::str(Register src, const MemOperand& dst, Condition cond) { void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
addrmod2(cond | B26, src, dst); AddrMode2(cond | B26, src, dst);
} }
void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) { void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
addrmod2(cond | B26 | B | L, dst, src); AddrMode2(cond | B26 | B | L, dst, src);
} }
void Assembler::strb(Register src, const MemOperand& dst, Condition cond) { void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
addrmod2(cond | B26 | B, src, dst); AddrMode2(cond | B26 | B, src, dst);
} }
void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) { void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
addrmod3(cond | L | B7 | H | B4, dst, src); AddrMode3(cond | L | B7 | H | B4, dst, src);
} }
void Assembler::strh(Register src, const MemOperand& dst, Condition cond) { void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
addrmod3(cond | B7 | H | B4, src, dst); AddrMode3(cond | B7 | H | B4, src, dst);
} }
void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) { void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
addrmod3(cond | L | B7 | S6 | B4, dst, src); AddrMode3(cond | L | B7 | S6 | B4, dst, src);
} }
void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) { void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
addrmod3(cond | L | B7 | S6 | H | B4, dst, src); AddrMode3(cond | L | B7 | S6 | H | B4, dst, src);
} }
@ -2091,7 +2114,7 @@ void Assembler::ldrd(Register dst1, Register dst2,
DCHECK(!dst1.is(lr)); // r14. DCHECK(!dst1.is(lr)); // r14.
DCHECK_EQ(0, dst1.code() % 2); DCHECK_EQ(0, dst1.code() % 2);
DCHECK_EQ(dst1.code() + 1, dst2.code()); DCHECK_EQ(dst1.code() + 1, dst2.code());
addrmod3(cond | B7 | B6 | B4, dst1, src); AddrMode3(cond | B7 | B6 | B4, dst1, src);
} }
@ -2101,7 +2124,7 @@ void Assembler::strd(Register src1, Register src2,
DCHECK(!src1.is(lr)); // r14. DCHECK(!src1.is(lr)); // r14.
DCHECK_EQ(0, src1.code() % 2); DCHECK_EQ(0, src1.code() % 2);
DCHECK_EQ(src1.code() + 1, src2.code()); DCHECK_EQ(src1.code() + 1, src2.code());
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst); AddrMode3(cond | B7 | B6 | B5 | B4, src1, dst);
} }
// Load/Store exclusive instructions. // Load/Store exclusive instructions.
@ -2179,7 +2202,7 @@ void Assembler::ldm(BlockAddrMode am,
// ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable. // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
DCHECK(base.is(sp) || (dst & sp.bit()) == 0); DCHECK(base.is(sp) || (dst & sp.bit()) == 0);
addrmod4(cond | B27 | am | L, base, dst); AddrMode4(cond | B27 | am | L, base, dst);
// Emit the constant pool after a function return implemented by ldm ..{..pc}. // Emit the constant pool after a function return implemented by ldm ..{..pc}.
if (cond == al && (dst & pc.bit()) != 0) { if (cond == al && (dst & pc.bit()) != 0) {
@ -2197,7 +2220,7 @@ void Assembler::stm(BlockAddrMode am,
Register base, Register base,
RegList src, RegList src,
Condition cond) { Condition cond) {
addrmod4(cond | B27 | am, base, src); AddrMode4(cond | B27 | am, base, src);
} }
@ -2335,7 +2358,7 @@ void Assembler::ldc(Coprocessor coproc,
const MemOperand& src, const MemOperand& src,
LFlag l, LFlag l,
Condition cond) { Condition cond) {
addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src); AddrMode5(cond | B27 | B26 | l | L | coproc * B8, crd, src);
} }
@ -4899,7 +4922,7 @@ int Assembler::DecodeShiftImm(Instr instr) {
Instr Assembler::PatchShiftImm(Instr instr, int immed) { Instr Assembler::PatchShiftImm(Instr instr, int immed) {
uint32_t rotate_imm = 0; uint32_t rotate_imm = 0;
uint32_t immed_8 = 0; uint32_t immed_8 = 0;
bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL); bool immed_fits = FitsShifter(immed, &rotate_imm, &immed_8, NULL);
DCHECK(immed_fits); DCHECK(immed_fits);
USE(immed_fits); USE(immed_fits);
return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8; return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
@ -4927,7 +4950,7 @@ bool Assembler::IsOrrImmed(Instr instr) {
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
uint32_t dummy1; uint32_t dummy1;
uint32_t dummy2; uint32_t dummy2;
return fits_shifter(imm32, &dummy1, &dummy2, NULL); return FitsShifter(imm32, &dummy1, &dummy2, NULL);
} }
@ -5098,7 +5121,7 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
BlockConstPoolFor(1); BlockConstPoolFor(1);
// Emit relocation info. // Emit relocation info.
if (must_output_reloc_info(rmode, this) && !shared) { if (MustOutputRelocInfo(rmode, this) && !shared) {
RecordRelocInfo(rmode); RecordRelocInfo(rmode);
} }
} }

View File

@ -527,12 +527,20 @@ class Operand BASE_EMBEDDED {
static Operand EmbeddedNumber(double value); // Smi or HeapNumber static Operand EmbeddedNumber(double value); // Smi or HeapNumber
// Return true if this is a register operand. // Return true if this is a register operand.
INLINE(bool is_reg() const) { bool IsRegister() const {
return rm_.is_valid() && return rm_.is_valid() &&
rs_.is(no_reg) && rs_.is(no_reg) &&
shift_op_ == LSL && shift_op_ == LSL &&
shift_imm_ == 0; shift_imm_ == 0;
} }
// Return true if this is a register operand shifted with an immediate.
bool IsImmediateShiftedRegister() const {
return rm_.is_valid() && !rs_.is_valid();
}
// Return true if this is a register operand shifted with a register.
bool IsRegisterShiftedRegister() const {
return rm_.is_valid() && rs_.is_valid();
}
// Return the number of actual instructions required to implement the given // Return the number of actual instructions required to implement the given
// instruction for this particular operand. This can be a single instruction, // instruction for this particular operand. This can be a single instruction,
@ -545,28 +553,31 @@ class Operand BASE_EMBEDDED {
// //
// The value returned is only valid as long as no entries are added to the // The value returned is only valid as long as no entries are added to the
// constant pool between this call and the actual instruction being emitted. // constant pool between this call and the actual instruction being emitted.
int instructions_required(const Assembler* assembler, Instr instr = 0) const; int InstructionsRequired(const Assembler* assembler, Instr instr = 0) const;
bool must_output_reloc_info(const Assembler* assembler) const; bool MustOutputRelocInfo(const Assembler* assembler) const;
inline int32_t immediate() const { inline int32_t immediate() const {
DCHECK(!rm_.is_valid()); DCHECK(IsImmediate());
DCHECK(!is_heap_number()); DCHECK(!IsHeapNumber());
return value_.immediate; return value_.immediate;
} }
bool IsImmediate() const {
return !rm_.is_valid();
}
double heap_number() const { double heap_number() const {
DCHECK(is_heap_number()); DCHECK(IsHeapNumber());
return value_.heap_number; return value_.heap_number;
} }
bool IsHeapNumber() const {
DCHECK_IMPLIES(is_heap_number_, IsImmediate());
DCHECK_IMPLIES(is_heap_number_, rmode_ == RelocInfo::EMBEDDED_OBJECT);
return is_heap_number_;
}
Register rm() const { return rm_; } Register rm() const { return rm_; }
Register rs() const { return rs_; } Register rs() const { return rs_; }
ShiftOp shift_op() const { return shift_op_; } ShiftOp shift_op() const { return shift_op_; }
bool is_heap_number() const {
DCHECK_IMPLIES(is_heap_number_, !rm_.is_valid());
DCHECK_IMPLIES(is_heap_number_, rmode_ == RelocInfo::EMBEDDED_OBJECT);
return is_heap_number_;
}
private: private:
@ -1844,16 +1855,21 @@ class Assembler : public AssemblerBase {
void GrowBuffer(); void GrowBuffer();
// 32-bit immediate values // 32-bit immediate values
void move_32_bit_immediate(Register rd, void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
const Operand& x,
Condition cond = al);
// Instruction generation // Instruction generation
void addrmod1(Instr instr, Register rn, Register rd, const Operand& x); void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x);
void addrmod2(Instr instr, Register rd, const MemOperand& x); // Attempt to encode operand |x| for instruction |instr| and return true on
void addrmod3(Instr instr, Register rd, const MemOperand& x); // success. The result will be encoded in |instr| directly. This method may
void addrmod4(Instr instr, Register rn, RegList rl); // change the opcode if deemed beneficial, for instance, MOV may be turned
void addrmod5(Instr instr, CRegister crd, const MemOperand& x); // into MVN, ADD into SUB, AND into BIC, ...etc. The only reason this method
// may fail is that the operand is an immediate that cannot be encoded.
bool AddrMode1TryEncodeOperand(Instr* instr, const Operand& x);
void AddrMode2(Instr instr, Register rd, const MemOperand& x);
void AddrMode3(Instr instr, Register rd, const MemOperand& x);
void AddrMode4(Instr instr, Register rn, RegList rl);
void AddrMode5(Instr instr, CRegister crd, const MemOperand& x);
// Labels // Labels
void print(Label* L); void print(Label* L);

View File

@ -88,7 +88,7 @@ int MacroAssembler::CallSize(
Instr mov_instr = cond | MOV | LeaveCC; Instr mov_instr = cond | MOV | LeaveCC;
Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode); Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
return kInstrSize + return kInstrSize +
mov_operand.instructions_required(this, mov_instr) * kInstrSize; mov_operand.InstructionsRequired(this, mov_instr) * kInstrSize;
} }
@ -318,12 +318,11 @@ void MacroAssembler::Mls(Register dst, Register src1, Register src2,
void MacroAssembler::And(Register dst, Register src1, const Operand& src2, void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) { Condition cond) {
if (!src2.is_reg() && if (!src2.IsRegister() && !src2.MustOutputRelocInfo(this) &&
!src2.must_output_reloc_info(this) &&
src2.immediate() == 0) { src2.immediate() == 0) {
mov(dst, Operand::Zero(), LeaveCC, cond); mov(dst, Operand::Zero(), LeaveCC, cond);
} else if (!(src2.instructions_required(this) == 1) && } else if (!(src2.InstructionsRequired(this) == 1) &&
!src2.must_output_reloc_info(this) && !src2.MustOutputRelocInfo(this) &&
CpuFeatures::IsSupported(ARMv7) && CpuFeatures::IsSupported(ARMv7) &&
base::bits::IsPowerOfTwo32(src2.immediate() + 1)) { base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
CpuFeatureScope scope(this, ARMv7); CpuFeatureScope scope(this, ARMv7);
@ -1992,7 +1991,7 @@ void MacroAssembler::Allocate(int object_size,
object_size -= bits; object_size -= bits;
shift += 8; shift += 8;
Operand bits_operand(bits); Operand bits_operand(bits);
DCHECK(bits_operand.instructions_required(this) == 1); DCHECK(bits_operand.InstructionsRequired(this) == 1);
add(result_end, source, bits_operand); add(result_end, source, bits_operand);
source = result_end; source = result_end;
} }
@ -2202,7 +2201,7 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
object_size -= bits; object_size -= bits;
shift += 8; shift += 8;
Operand bits_operand(bits); Operand bits_operand(bits);
DCHECK(bits_operand.instructions_required(this) == 1); DCHECK(bits_operand.InstructionsRequired(this) == 1);
add(result_end, source, bits_operand); add(result_end, source, bits_operand);
source = result_end; source = result_end;
} }

View File

@ -182,7 +182,7 @@ class MacroAssembler: public Assembler {
void Move(Register dst, Register src, Condition cond = al); void Move(Register dst, Register src, Condition cond = al);
void Move(Register dst, const Operand& src, SBit sbit = LeaveCC, void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
Condition cond = al) { Condition cond = al) {
if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) { if (!src.IsRegister() || !src.rm().is(dst) || sbit != LeaveCC) {
mov(dst, src, sbit, cond); mov(dst, src, sbit, cond);
} }
} }