ARM: Be more smart about switching instructions when immediates

don't fit in the instruction.  Use ubfx and sbfx more.
Review URL: http://codereview.chromium.org/2826001

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4855 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
erik.corry@gmail.com 2010-06-14 11:20:36 +00:00
parent 317dcabe84
commit 142de62819
10 changed files with 288 additions and 83 deletions

View File

@ -279,6 +279,20 @@ const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
const Instr kMovMvnPattern = 0xd * B21;
const Instr kMovMvnFlip = B22;
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21;
const Instr kALUMask = 0x6f * B21;
const Instr kAddPattern = 0x4 * B21;
const Instr kSubPattern = 0x2 * B21;
const Instr kBicPattern = 0xe * B21;
const Instr kAndPattern = 0x0 * B21;
const Instr kAddSubFlip = 0x6 * B21;
const Instr kAndBicFlip = 0xe * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kRdMask = 0x0000f000;
static const int kRdShift = 12;
@ -627,6 +641,9 @@ void Assembler::next(Label* L) {
// Low-level code emission routines depending on the addressing mode.
// If this returns true then you have to use the rotate_imm and immed_8
// that it returns, because it may have already changed the instruction
// to match them!
static bool fits_shifter(uint32_t imm32,
uint32_t* rotate_imm,
uint32_t* immed_8,
@ -640,11 +657,34 @@ static bool fits_shifter(uint32_t imm32,
return true;
}
}
// If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= 0x2*B21;
return true;
// If the opcode is one with a complementary version and the complementary
// immediate fits, change the opcode.
if (instr != NULL) {
if ((*instr & kMovMvnMask) == kMovMvnPattern) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kMovMvnFlip;
return true;
}
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kCmpCmnFlip;
return true;
}
} else {
Instr alu_insn = (*instr & kALUMask);
if (alu_insn == kAddPattern ||
alu_insn == kSubPattern) {
if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kAddSubFlip;
return true;
}
} else if (alu_insn == kAndPattern ||
alu_insn == kBicPattern) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kAndBicFlip;
return true;
}
}
}
}
return false;
@ -670,6 +710,14 @@ static bool MustUseIp(RelocInfo::Mode rmode) {
}
bool Operand::is_single_instruction() const {
if (rm_.is_valid()) return true;
if (MustUseIp(rmode_)) return false;
uint32_t dummy1, dummy2;
return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
}
void Assembler::addrmod1(Instr instr,
Register rn,
Register rd,

View File

@ -418,6 +418,15 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
// Return true of this operand fits in one instruction so that no
// 2-instruction solution with a load into the ip register is necessary.
bool is_single_instruction() const;
inline int32_t immediate() const {
ASSERT(!rm_.is_valid());
return imm32_;
}
Register rm() const { return rm_; }
private:
@ -532,6 +541,21 @@ extern const Instr kLdrPCPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
extern const Instr kMovMvnMask;
extern const Instr kMovMvnPattern;
extern const Instr kMovMvnFlip;
extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip;
extern const Instr kALUMask;
extern const Instr kAddPattern;
extern const Instr kSubPattern;
extern const Instr kAndPattern;
extern const Instr kBicPattern;
extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip;
class Assembler : public Malloced {
public:

View File

@ -136,7 +136,8 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
__ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
ASSERT(kSmiTag == 0);
__ sub(scratch1, scratch1, Operand(kHeapObjectTag));
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
@ -240,9 +241,10 @@ static void AllocateJSArray(MacroAssembler* masm,
FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
__ and_(elements_array_storage,
elements_array_storage,
Operand(~kHeapObjectTagMask));
ASSERT(kSmiTag == 0);
__ sub(elements_array_storage,
elements_array_storage,
Operand(kHeapObjectTag));
// Initialize the fixed array and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
@ -617,12 +619,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// The field instance sizes contains both pre-allocated property fields and
// in-object properties.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ and_(r6,
r0,
Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
__ add(r3, r3, Operand(r6, LSR, Map::kPreAllocatedPropertyFieldsByte * 8));
__ and_(r6, r0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
__ sub(r3, r3, Operand(r6, LSR, Map::kInObjectPropertiesByte * 8), SetCC);
__ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
__ add(r3, r3, Operand(r6));
__ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
__ sub(r3, r3, Operand(r6), SetCC);
// Done if no extra properties are to be allocated.
__ b(eq, &allocated);

View File

@ -1203,7 +1203,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
default: UNREACHABLE();
}
frame_->EmitPush(tos, TypeInfo::Smi());
@ -1215,7 +1215,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
default: UNREACHABLE();
}
deferred->BindExit();
@ -6628,8 +6628,12 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
// Gets the wrong answer for 0, but we already checked for that case above.
__ CountLeadingZeros(source_, mantissa, zeros_);
// Compute exponent and or it into the exponent register.
// We use mantissa as a scratch register here.
__ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
// We use mantissa as a scratch register here. Use a fudge factor to
// divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
// that fit in the ARM's constant field.
int fudge = 0x400;
__ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
__ add(mantissa, mantissa, Operand(fudge));
__ orr(exponent,
exponent,
Operand(mantissa, LSL, HeapNumber::kExponentShift));
@ -6702,15 +6706,12 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
bool never_nan_nan) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = r5;
__ cmp(r0, r1);
__ b(ne, &not_identical);
// The two objects are identical. If we know that one of them isn't NaN then
// we now know they test equal.
if (cc != eq || !never_nan_nan) {
__ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
@ -6771,8 +6772,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Read top bits of double representation (second word of value).
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
// Test that exponent bits are all set.
__ and_(r3, r2, Operand(exp_mask_reg));
__ cmp(r3, Operand(exp_mask_reg));
__ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// NaNs have all-one exponents so they sign extend to -1.
__ cmp(r3, Operand(-1));
__ b(ne, &return_equal);
// Shift out flag and all exponent bits, retaining only mantissa.
@ -6893,14 +6895,14 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
Register rhs_mantissa = exp_first ? r1 : r0;
Register lhs_mantissa = exp_first ? r3 : r2;
Label one_is_nan, neither_is_nan;
Label lhs_not_nan_exp_mask_is_loaded;
Register exp_mask_reg = r5;
__ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
__ and_(r4, lhs_exponent, Operand(exp_mask_reg));
__ cmp(r4, Operand(exp_mask_reg));
__ b(ne, &lhs_not_nan_exp_mask_is_loaded);
__ Sbfx(r4,
lhs_exponent,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
// NaNs have all-one exponents so they sign extend to -1.
__ cmp(r4, Operand(-1));
__ b(ne, lhs_not_nan);
__ mov(r4,
Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
SetCC);
@ -6909,10 +6911,12 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
__ b(ne, &one_is_nan);
__ bind(lhs_not_nan);
__ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
__ bind(&lhs_not_nan_exp_mask_is_loaded);
__ and_(r4, rhs_exponent, Operand(exp_mask_reg));
__ cmp(r4, Operand(exp_mask_reg));
__ Sbfx(r4,
rhs_exponent,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
// NaNs have all-one exponents so they sign extend to -1.
__ cmp(r4, Operand(-1));
__ b(ne, &neither_is_nan);
__ mov(r4,
Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
@ -7633,7 +7637,10 @@ static void GetInt32(MacroAssembler* masm,
// Get exponent word.
__ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
__ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
__ Ubfx(scratch2,
scratch,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
// Load dest with zero. We use this either for the final shift or
// for the answer.
__ mov(dest, Operand(0));
@ -7641,9 +7648,14 @@ static void GetInt32(MacroAssembler* masm,
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
// the exponent that we are fastest at and also the highest exponent we can
// handle here.
const uint32_t non_smi_exponent =
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
__ cmp(scratch2, Operand(non_smi_exponent));
const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
// The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
// split it up to avoid a constant pool entry. You can't do that in general
// for cmp because of the overflow flag, but we know the exponent is in the
// range 0-2047 so there is no overflow.
int fudge_factor = 0x400;
__ sub(scratch2, scratch2, Operand(fudge_factor));
__ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
// If we have a match of the int32-but-not-Smi exponent then skip some logic.
__ b(eq, &right_exponent);
// If the exponent is higher than that then go to slow case. This catches
@ -7653,17 +7665,14 @@ static void GetInt32(MacroAssembler* masm,
// We know the exponent is smaller than 30 (biased). If it is less than
// 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
// it rounds to zero.
const uint32_t zero_exponent =
(HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
__ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
__ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
// Dest already has a Smi zero.
__ b(lt, &done);
if (!CpuFeatures::IsSupported(VFP3)) {
// We have a shifted exponent between 0 and 30 in scratch2.
__ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
// We now have the exponent in dest. Subtract from 30 to get
// how much to shift down.
__ rsb(dest, dest, Operand(30));
// We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
// get how much to shift down.
__ rsb(dest, scratch2, Operand(30));
}
__ bind(&right_exponent);
if (CpuFeatures::IsSupported(VFP3)) {
@ -8282,14 +8291,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ eor(r1, r1, Operand(r1, LSR, 16));
__ eor(r1, r1, Operand(r1, LSR, 8));
ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
if (CpuFeatures::IsSupported(ARMv7)) {
const int kTranscendentalCacheSizeBits = 9;
ASSERT_EQ(1 << kTranscendentalCacheSizeBits,
TranscendentalCache::kCacheSize);
__ ubfx(r1, r1, 0, kTranscendentalCacheSizeBits);
} else {
__ and_(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
}
__ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
// r2 = low 32 bits of double value.
// r3 = high 32 bits of double value.

View File

@ -873,7 +873,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
__ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
__ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
__ and_(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
__ And(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
@ -932,11 +932,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// We want the smi-tagged index in r0. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
ASSERT(String::kHashShift >= kSmiTagSize);
__ and_(r3, r3, Operand(String::kArrayIndexValueMask));
__ Ubfx(r3, r3, String::kHashShift, String::kArrayIndexValueBits);
// Here we actually clobber the key (r0) which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
__ mov(r0, Operand(r3, ASR, String::kHashShift - kSmiTagSize));
__ mov(r0, Operand(r3, LSL, kSmiTagSize));
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@ -1665,32 +1665,29 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
if (array_type == kExternalFloatArray) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ vcvt_f32_f64(s0, d0);
__ vmov(r5, s0);
__ str(r5, MemOperand(r3, r4, LSL, 2));
} else {
Label done;
// Need to perform float-to-int conversion.
// Test for NaN.
__ vcmp(d0, d0);
// Move vector status bits to normal status bits.
__ vmrs(v8::internal::pc);
__ mov(r5, Operand(0), LeaveCC, vs); // NaN converts to 0.
__ b(vs, &done);
// Test for NaN or infinity (both give zero).
__ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
// Test whether exponent equal to 0x7FF (infinity or NaN).
__ vmov(r6, r7, d0);
__ mov(r5, Operand(0x7FF00000));
__ and_(r6, r6, Operand(r5));
__ teq(r6, Operand(r5));
__ mov(r6, Operand(0), LeaveCC, eq);
// Hoisted load. vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// NaNs and Infinities have all-one exponents so they sign extend to -1.
__ cmp(r6, Operand(-1));
__ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
// Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
@ -1698,10 +1695,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
} else {
__ vcvt_u32_f64(s0, d0, ne);
}
__ vmov(r5, s0, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:

View File

@ -216,6 +216,60 @@ void MacroAssembler::Move(Register dst, Register src) {
}
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!CpuFeatures::IsSupported(ARMv7) || src2.is_single_instruction()) {
and_(dst, src1, src2, LeaveCC, cond);
return;
}
int32_t immediate = src2.immediate();
if (immediate == 0) {
mov(dst, Operand(0), LeaveCC, cond);
return;
}
if (IsPowerOf2(immediate + 1) && ((immediate & 1) != 0)) {
ubfx(dst, src1, 0, WhichPowerOf2(immediate + 1), cond);
return;
}
and_(dst, src1, src2, LeaveCC, cond);
}
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
}
} else {
ubfx(dst, src1, lsb, width, cond);
}
}
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
int shift_up = 32 - lsb - width;
int shift_down = lsb + shift_up;
if (shift_up != 0) {
mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
}
if (shift_down != 0) {
mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
}
} else {
sbfx(dst, src1, lsb, width, cond);
}
}
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);

View File

@ -93,6 +93,14 @@ class MacroAssembler: public Assembler {
Register scratch = no_reg,
Condition cond = al);
void And(Register dst, Register src1, const Operand& src2,
Condition cond = al);
void Ubfx(Register dst, Register src, int lsb, int width,
Condition cond = al);
void Sbfx(Register dst, Register src, int lsb, int width,
Condition cond = al);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
// May do nothing if the registers are identical.

View File

@ -1123,7 +1123,7 @@ class HeapNumber: public HeapObject {
static const uint32_t kExponentMask = 0x7ff00000u;
static const uint32_t kMantissaMask = 0xfffffu;
static const int kMantissaBits = 52;
static const int KExponentBits = 11;
static const int kExponentBits = 11;
static const int kExponentBias = 1023;
static const int kExponentShift = 20;
static const int kMantissaBitsInTopWord = 20;
@ -2832,14 +2832,14 @@ class Code: public HeapObject {
// Flags layout.
static const int kFlagsICStateShift = 0;
static const int kFlagsICInLoopShift = 3;
static const int kFlagsKindShift = 4;
static const int kFlagsTypeShift = 8;
static const int kFlagsTypeShift = 4;
static const int kFlagsKindShift = 7;
static const int kFlagsArgumentsCountShift = 11;
static const int kFlagsICStateMask = 0x00000007; // 00000000111
static const int kFlagsICInLoopMask = 0x00000008; // 00000001000
static const int kFlagsKindMask = 0x000000F0; // 00011110000
static const int kFlagsTypeMask = 0x00000700; // 11100000000
static const int kFlagsTypeMask = 0x00000070; // 00001110000
static const int kFlagsKindMask = 0x00000780; // 11110000000
static const int kFlagsArgumentsCountMask = 0xFFFFF800;
static const int kFlagsNotUsedInLookup =

View File

@ -47,6 +47,41 @@ static inline bool IsPowerOf2(T x) {
}
// X must be a power of 2. Returns the number of trailing zeros.
template <typename T>
static inline int WhichPowerOf2(T x) {
ASSERT(IsPowerOf2(x));
ASSERT(x != 0);
if (x < 0) return 31;
int bits = 0;
#ifdef DEBUG
int original_x = x;
#endif
if (x >= 0x10000) {
bits += 16;
x >>= 16;
}
if (x >= 0x100) {
bits += 8;
x >>= 8;
}
if (x >= 0x10) {
bits += 4;
x >>= 4;
}
switch (x) {
default: UNREACHABLE();
case 8: bits++; // Fall through.
case 4: bits++; // Fall through.
case 2: bits++; // Fall through.
case 1: break;
}
ASSERT_EQ(1 << bits, original_x);
return bits;
return 0;
}
// The C++ standard leaves the semantics of '>>' undefined for
// negative signed operands. Most implementations do the right thing,
// though.

View File

@ -248,6 +248,45 @@ TEST(Type0) {
COMPARE(mvn(r5, Operand(r4), SetCC, cc),
"31f05004 mvnccs r5, r4");
// Instructions autotransformed by the assembler.
// mov -> mvn.
COMPARE(mov(r3, Operand(-1), LeaveCC, al),
"e3e03000 mvn r3, #0");
COMPARE(mov(r4, Operand(-2), SetCC, al),
"e3f04001 mvns r4, #1");
COMPARE(mov(r5, Operand(0x0ffffff0), SetCC, ne),
"13f052ff mvnnes r5, #-268435441");
COMPARE(mov(r6, Operand(-1), LeaveCC, ne),
"13e06000 mvnne r6, #0");
// mvn -> mov.
COMPARE(mvn(r3, Operand(-1), LeaveCC, al),
"e3a03000 mov r3, #0");
COMPARE(mvn(r4, Operand(-2), SetCC, al),
"e3b04001 movs r4, #1");
COMPARE(mvn(r5, Operand(0x0ffffff0), SetCC, ne),
"13b052ff movnes r5, #-268435441");
COMPARE(mvn(r6, Operand(-1), LeaveCC, ne),
"13a06000 movne r6, #0");
// and <-> bic.
COMPARE(and_(r3, r5, Operand(0xfc03ffff)),
"e3c537ff bic r3, r5, #66846720");
COMPARE(bic(r3, r5, Operand(0xfc03ffff)),
"e20537ff and r3, r5, #66846720");
// sub <-> add.
COMPARE(add(r3, r5, Operand(-1024)),
"e2453b01 sub r3, r5, #1024");
COMPARE(sub(r3, r5, Operand(-1024)),
"e2853b01 add r3, r5, #1024");
// cmp <-> cmn.
COMPARE(cmp(r3, Operand(-1024)),
"e3730b01 cmn r3, #1024");
COMPARE(cmn(r3, Operand(-1024)),
"e3530b01 cmp r3, #1024");
// Miscellaneous instructions encoded as type 0.
COMPARE(blx(ip),
"e12fff3c blx ip");