Port of optimized ICs for external and pixel arrays from ia32 to ARM.

Review URL: http://codereview.chromium.org/993002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4228 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
vegorov@chromium.org 2010-03-23 13:38:04 +00:00
parent 65115c2857
commit b7833cb5f1
13 changed files with 1498 additions and 332 deletions

View File

@ -1341,11 +1341,28 @@ void Assembler::vldr(const DwVfpRegister dst,
// Vdst(15-12) | 1011(11-8) | offset
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
}
void Assembler::vldr(const SwVfpRegister dst,
const Register base,
int offset,
const Condition cond) {
// Sdst = MEM(Rbase + offset).
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
0xA*B8 | ((offset / 4) & 255));
}
void Assembler::vstr(const DwVfpRegister src,
const Register base,
int offset,
@ -1356,6 +1373,7 @@ void Assembler::vstr(const DwVfpRegister src,
// Vsrc(15-12) | 1011(11-8) | (offset/4)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
}
@ -1419,31 +1437,172 @@ void Assembler::vmov(const Register dst,
}
void Assembler::vcvt(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond) {
// Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
// Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
(0x1 & src.code())*B5 | (src.code() >> 1));
// Type of data to read from or write to VFP register.
// Used as specifier in generic vcvt instruction.
enum VFPType { S32, U32, F32, F64 };
static bool IsSignedVFPType(VFPType type) {
switch (type) {
case S32:
return true;
case U32:
return false;
default:
UNREACHABLE();
return false;
}
}
void Assembler::vcvt(const SwVfpRegister dst,
static bool IsIntegerVFPType(VFPType type) {
switch (type) {
case S32:
case U32:
return true;
case F32:
case F64:
return false;
default:
UNREACHABLE();
return false;
}
}
static bool IsDoubleVFPType(VFPType type) {
switch (type) {
case F32:
return false;
case F64:
return true;
default:
UNREACHABLE();
return false;
}
}
// Depending on split_last_bit split binary representation of reg_code into Vm:M
// or M:Vm form (where M is single bit).
static void SplitRegCode(bool split_last_bit,
int reg_code,
int* vm,
int* m) {
if (split_last_bit) {
*m = reg_code & 0x1;
*vm = reg_code >> 1;
} else {
*m = (reg_code & 0x10) >> 4;
*vm = reg_code & 0x0F;
}
}
// Encode vcvt.src_type.dst_type instruction.
static Instr EncodeVCVT(const VFPType dst_type,
const int dst_code,
const VFPType src_type,
const int src_code,
const Condition cond) {
if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
// Conversion between IEEE floating point and 32-bit integer.
// Instruction details available in ARM DDI 0406B, A8.6.295.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
// Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
int sz, opc2, D, Vd, M, Vm, op;
if (IsIntegerVFPType(dst_type)) {
opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
op = 1; // round towards zero
SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
SplitRegCode(true, dst_code, &Vd, &D);
} else {
ASSERT(IsIntegerVFPType(src_type));
opc2 = 0x0;
sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
SplitRegCode(true, src_code, &Vm, &M);
SplitRegCode(!IsDoubleVFPType(dst_type), dst_code, &Vd, &D);
}
return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
} else {
// Conversion between IEEE double and single precision.
// Instruction details available in ARM DDI 0406B, A8.6.298.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
// Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
int sz, D, Vd, M, Vm;
ASSERT(IsDoubleVFPType(dst_type) != IsDoubleVFPType(src_type));
sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
SplitRegCode(IsDoubleVFPType(src_type), dst_code, &Vd, &D);
SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
}
}
void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), cond));
}
void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), cond));
}
void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), cond));
}
void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
// Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
// Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
// Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
0x5*B9 | B8 | B7 | B6 | src.code());
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), cond));
}
void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), cond));
}
void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), cond));
}
void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), cond));
}

View File

@ -826,6 +826,12 @@ class Assembler : public Malloced {
const Register base,
int offset, // Offset must be a multiple of 4.
const Condition cond = al);
void vldr(const SwVfpRegister dst,
const Register base,
int offset, // Offset must be a multiple of 4.
const Condition cond = al);
void vstr(const DwVfpRegister src,
const Register base,
int offset, // Offset must be a multiple of 4.
@ -844,10 +850,25 @@ class Assembler : public Malloced {
void vmov(const Register dst,
const SwVfpRegister src,
const Condition cond = al);
void vcvt(const DwVfpRegister dst,
void vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
void vcvt(const SwVfpRegister dst,
void vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
void vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
void vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
void vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);

View File

@ -4682,42 +4682,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
// (31 instead of 32).
static void CountLeadingZeros(
MacroAssembler* masm,
Register source,
Register scratch,
Register zeros) {
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
__ clz(zeros, source); // This instruction is only supported after ARM5.
#else
__ mov(zeros, Operand(0));
__ mov(scratch, source);
// Top 16.
__ tst(scratch, Operand(0xffff0000));
__ add(zeros, zeros, Operand(16), LeaveCC, eq);
__ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
// Top 8.
__ tst(scratch, Operand(0xff000000));
__ add(zeros, zeros, Operand(8), LeaveCC, eq);
__ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
// Top 4.
__ tst(scratch, Operand(0xf0000000));
__ add(zeros, zeros, Operand(4), LeaveCC, eq);
__ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
// Top 2.
__ tst(scratch, Operand(0xc0000000));
__ add(zeros, zeros, Operand(2), LeaveCC, eq);
__ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
// Top bit.
__ tst(scratch, Operand(0x80000000u));
__ add(zeros, zeros, Operand(1), LeaveCC, eq);
#endif
}
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
@ -4781,25 +4745,27 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
__ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
// Subtract from 0 if source was negative.
__ rsb(source_, source_, Operand(0), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register source_ contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
// greater than 1 (not a special case) or less than 1 (special case of 0).
__ cmp(source_, Operand(1));
__ b(gt, &not_special);
// We have -1, 0 or 1, which we treat specially.
__ cmp(source_, Operand(0));
// For 1 or -1 we need to or in the 0 exponent (biased to 1023).
static const uint32_t exponent_word_for_1 =
HeapNumber::kExponentBias << HeapNumber::kExponentShift;
__ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne);
__ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
// 1, 0 and -1 all have 0 for the second word.
__ mov(mantissa, Operand(0));
__ Ret();
__ bind(&not_special);
// Count leading zeros. Uses result2 for a scratch register on pre-ARM5.
// Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
// Gets the wrong answer for 0, but we already checked for that case above.
CountLeadingZeros(masm, source_, mantissa, zeros_);
__ CountLeadingZeros(source_, mantissa, zeros_);
// Compute exponent and or it into the exponent register.
// We use result2 as a scratch register here.
// We use mantissa as a scratch register here.
__ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
__ orr(exponent,
exponent,
@ -4818,45 +4784,6 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
}
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public CodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
Register scratch)
: the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch) { }
private:
Register the_int_;
Register the_heap_number_;
Register scratch_;
// Minor key encoding in 16 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 14> {};
Major MajorKey() { return WriteInt32ToHeapNumber; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return the_int_.code() +
(the_heap_number_.code() << 4) +
(scratch_.code() << 8);
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
#ifdef DEBUG
void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
#endif
};
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@ -5039,7 +4966,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
CpuFeatures::Scope scope(VFP3);
__ mov(r7, Operand(r1, ASR, kSmiTagSize));
__ vmov(s15, r7);
__ vcvt(d7, s15);
__ vcvt_f64_s32(d7, s15);
// Load the double from rhs, tagged HeapNumber r0, to d6.
__ sub(r7, r0, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
@ -5082,7 +5009,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ vldr(d7, r7, HeapNumber::kValueOffset);
__ mov(r7, Operand(r0, ASR, kSmiTagSize));
__ vmov(s13, r7);
__ vcvt(d6, s13);
__ vcvt_f64_s32(d6, s13);
} else {
__ push(lr);
// Load lhs to a double in r2, r3.
@ -5491,29 +5418,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
// Allocates a heap number or jumps to the label if the young space is full and
// a scavenge is needed.
static void AllocateHeapNumber(
MacroAssembler* masm,
Label* need_gc, // Jump here if young space is full.
Register result, // The tagged address of the new heap number.
Register scratch1, // A scratch register.
Register scratch2) { // Another scratch register.
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
__ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
result,
scratch1,
scratch2,
need_gc,
TAG_OBJECT);
// Get heap number map and store it in the allocated object.
__ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
__ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
}
// We fall into this code if the operands were Smis, but the result was
// not (eg. overflow). We branch into this code (to the not_smi label) if
// the operands were not both Smi. The operands are in r0 and r1. In order
@ -5530,7 +5434,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Smi-smi case (overflow).
// Since both are Smis there is no heap number to overwrite, so allocate.
// The new heap number is in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
__ AllocateHeapNumber(r5, r6, r7, &slow);
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values.
@ -5540,10 +5444,10 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
CpuFeatures::Scope scope(VFP3);
__ mov(r7, Operand(r0, ASR, kSmiTagSize));
__ vmov(s15, r7);
__ vcvt(d7, s15);
__ vcvt_f64_s32(d7, s15);
__ mov(r7, Operand(r1, ASR, kSmiTagSize));
__ vmov(s13, r7);
__ vcvt(d6, s13);
__ vcvt_f64_s32(d6, s13);
} else {
// Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
__ mov(r7, Operand(r0));
@ -5625,7 +5529,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
if (mode == NO_OVERWRITE) {
// In the case where there is no chance of an overwritable float we may as
// well do the allocation immediately while r0 and r1 are untouched.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
__ AllocateHeapNumber(r5, r6, r7, &slow);
}
// Move r0 to a double in r2-r3.
@ -5650,7 +5554,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
__ bind(&r0_is_smi);
if (mode == OVERWRITE_RIGHT) {
// We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
__ AllocateHeapNumber(r5, r6, r7, &slow);
}
if (use_fp_registers) {
@ -5658,7 +5562,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Convert smi in r0 to double in d7.
__ mov(r7, Operand(r0, ASR, kSmiTagSize));
__ vmov(s15, r7);
__ vcvt(d7, s15);
__ vcvt_f64_s32(d7, s15);
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
@ -5692,7 +5596,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
__ bind(&r1_is_smi);
if (mode == OVERWRITE_LEFT) {
// We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
__ AllocateHeapNumber(r5, r6, r7, &slow);
}
if (use_fp_registers) {
@ -5700,7 +5604,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Convert smi in r1 to double in d6.
__ mov(r7, Operand(r1, ASR, kSmiTagSize));
__ vmov(s13, r7);
__ vcvt(d6, s13);
__ vcvt_f64_s32(d6, s13);
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
@ -5827,7 +5731,7 @@ static void GetInt32(MacroAssembler* masm,
// conversion using round to zero.
__ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
__ vmov(d7, scratch2, scratch);
__ vcvt(s15, d7);
__ vcvt_s32_f64(s15, d7);
__ vmov(dest, s15);
} else {
// Get the top bits of the mantissa.
@ -5939,7 +5843,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
}
case NO_OVERWRITE: {
// Get a new heap number in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
__ AllocateHeapNumber(r5, r6, r7, &slow);
}
default: break;
}
@ -5959,7 +5863,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
if (mode_ != NO_OVERWRITE) {
__ bind(&have_to_allocate);
// Get a new heap number in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
__ AllocateHeapNumber(r5, r6, r7, &slow);
__ jmp(&got_a_heap_number);
}
@ -6377,7 +6281,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
} else {
AllocateHeapNumber(masm, &slow, r1, r2, r3);
__ AllocateHeapNumber(r1, r2, r3, &slow);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
@ -6407,7 +6311,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
// Allocate a fresh heap number, but don't overwrite r0 until
// we're sure we can do it without going through the slow case
// that needs the value in r0.
AllocateHeapNumber(masm, &slow, r2, r3, r4);
__ AllocateHeapNumber(r2, r3, r4, &slow);
__ mov(r0, Operand(r2));
}

View File

@ -660,6 +660,46 @@ class StringCompareStub: public CodeStub {
};
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public CodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
Register scratch)
: the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch) { }
private:
Register the_int_;
Register the_heap_number_;
Register scratch_;
// Minor key encoding in 16 bits.
class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
Major MajorKey() { return WriteInt32ToHeapNumber; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return IntRegisterBits::encode(the_int_.code())
| HeapNumberRegisterBits::encode(the_heap_number_.code())
| ScratchRegisterBits::encode(scratch_.code());
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
#ifdef DEBUG
void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
#endif
};
class NumberToStringStub: public CodeStub {
public:
NumberToStringStub() { }

View File

@ -81,9 +81,27 @@ const char* VFPRegisters::names_[kNumVFPRegisters] = {
};
const char* VFPRegisters::Name(int reg) {
const char* VFPRegisters::Name(int reg, bool is_double) {
ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
return names_[reg];
return names_[reg + is_double ? kNumVFPSingleRegisters : 0];
}
int VFPRegisters::Number(const char* name, bool* is_double) {
for (int i = 0; i < kNumVFPRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
if (i < kNumVFPSingleRegisters) {
*is_double = false;
return i;
} else {
*is_double = true;
return i - kNumVFPSingleRegisters;
}
}
}
// No register with the requested name found.
return kNoRegister;
}
@ -104,7 +122,7 @@ int Registers::Number(const char* name) {
i++;
}
// No register with the reguested name found.
// No register with the requested name found.
return kNoRegister;
}

View File

@ -84,7 +84,10 @@ namespace arm {
static const int kNumRegisters = 16;
// VFP support.
static const int kNumVFPRegisters = 48;
static const int kNumVFPSingleRegisters = 32;
static const int kNumVFPDoubleRegisters = 16;
static const int kNumVFPRegisters =
kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
// PC is register 15.
static const int kPCRegister = 15;
@ -254,6 +257,14 @@ class Instr {
inline int RtField() const { return Bits(15, 12); }
inline int PField() const { return Bit(24); }
inline int UField() const { return Bit(23); }
inline int Opc1Field() const { return (Bit(23) << 2) | Bits(21, 20); }
inline int Opc2Field() const { return Bits(19, 16); }
inline int Opc3Field() const { return Bits(7, 6); }
inline int SzField() const { return Bit(8); }
inline int VLField() const { return Bit(20); }
inline int VCField() const { return Bit(8); }
inline int VAField() const { return Bits(23, 21); }
inline int VBField() const { return Bits(6, 5); }
// Fields used in Data processing instructions
inline Opcode OpcodeField() const {
@ -344,7 +355,12 @@ class Registers {
class VFPRegisters {
public:
// Return the name of the register.
static const char* Name(int reg);
static const char* Name(int reg, bool is_double);
// Lookup the register number for the name provided.
// Set flag pointed by is_double to true if register
// is double-precision.
static int Number(const char* name, bool* is_double);
private:
static const char* names_[kNumVFPRegisters];

View File

@ -129,6 +129,10 @@ class Decoder {
void DecodeTypeVFP(Instr* instr);
void DecodeType6CoprocessorIns(Instr* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
void DecodeVCMP(Instr* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
const disasm::NameConverter& converter_;
v8::internal::Vector<char> out_buffer_;
@ -181,12 +185,12 @@ void Decoder::PrintRegister(int reg) {
// Print the VFP S register name according to the active name converter.
void Decoder::PrintSRegister(int reg) {
Print(assembler::arm::VFPRegisters::Name(reg));
Print(assembler::arm::VFPRegisters::Name(reg, false));
}
// Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
Print(assembler::arm::VFPRegisters::Name(reg + 32));
Print(assembler::arm::VFPRegisters::Name(reg, true));
}
@ -930,43 +934,57 @@ void Decoder::DecodeUnconditional(Instr* instr) {
// VMRS
void Decoder::DecodeTypeVFP(Instr* instr) {
ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
if (instr->Bit(23) == 1) {
if ((instr->Bits(21, 19) == 0x7) &&
(instr->Bits(18, 16) == 0x5) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
} else if ((instr->Bits(21, 19) == 0x7) &&
(instr->Bits(18, 16) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(7) == 1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
} else if ((instr->Bit(21) == 0x0) &&
(instr->Bit(20) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) {
// Other data processing instructions
if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
} else if (((instr->Opc2Field() >> 1) == 0x6) &&
(instr->Opc3Field() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
} else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Opc3Field() & 0x1)) {
DecodeVCMP(instr);
} else {
Unknown(instr); // Not used by V8.
}
} else if (instr->Opc1Field() == 0x3) {
if (instr->SzField() == 0x1) {
if (instr->Opc3Field() & 0x1) {
Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
}
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
if (instr->SzField() == 0x1) {
Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
if (instr->SzField() == 0x1) {
Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
} else if ((instr->Bits(21, 20) == 0x3) &&
(instr->Bits(19, 16) == 0x4) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0x1) &&
(instr->Bit(4) == 0x0)) {
Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
} else if ((instr->Bits(23, 20) == 0xF) &&
(instr->Bits(19, 16) == 0x1) &&
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(7, 5) == 0x0) &&
(instr->Bit(4) == 0x1) &&
(instr->Bits(3, 0) == 0x0)) {
} else {
Unknown(instr); // Not used by V8.
}
} else {
Unknown(instr); // Not used by V8.
}
} else {
if ((instr->VCField() == 0x0) &&
(instr->VAField() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
} else if ((instr->VLField() == 0x1) &&
(instr->VCField() == 0x0) &&
(instr->VAField() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
if (instr->Bits(15, 12) == 0xF)
Format(instr, "vmrs'cond APSR, FPSCR");
else
@ -974,43 +992,93 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
} else {
Unknown(instr); // Not used by V8.
}
} else if (instr->Bit(21) == 1) {
if ((instr->Bit(20) == 0x1) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
} else if ((instr->Bit(20) == 0x1) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
} else if ((instr->Bit(20) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
}
}
void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
(instr->VAField() == 0x0));
bool to_arm_register = (instr->VLField() == 0x1);
if (to_arm_register) {
Format(instr, "vmov'cond 'rt, 'Sn");
} else {
Format(instr, "vmov'cond 'Sn, 'rt");
}
}
void Decoder::DecodeVCMP(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Opc3Field() & 0x1));
// Comparison.
bool dp_operation = (instr->SzField() == 1);
bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
if (dp_operation && !raise_exception_for_qnan) {
Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
}
void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
bool double_to_single = (instr->SzField() == 1);
if (double_to_single) {
Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
} else {
if ((instr->Bit(20) == 0x0) &&
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(6, 5) == 0x0) &&
(instr->Bit(4) == 1) &&
(instr->Bits(3, 0) == 0x0)) {
Format(instr, "vmov'cond 'Sn, 'rt");
} else if ((instr->Bit(20) == 0x1) &&
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(6, 5) == 0x0) &&
(instr->Bit(4) == 1) &&
(instr->Bits(3, 0) == 0x0)) {
Format(instr, "vmov'cond 'rt, 'Sn");
Format(instr, "vcvt.f64.f32'cond 'Dd, 'Sm");
}
}
void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
(((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
bool to_integer = (instr->Bit(18) == 1);
bool dp_operation = (instr->SzField() == 1);
if (to_integer) {
bool unsigned_integer = (instr->Bit(16) == 0);
if (dp_operation) {
if (unsigned_integer) {
Format(instr, "vcvt.u32.f64'cond 'Sd, 'Dm");
} else {
Unknown(instr); // Not used by V8.
Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
}
} else {
if (unsigned_integer) {
Format(instr, "vcvt.u32.f32'cond 'Sd, 'Sm");
} else {
Format(instr, "vcvt.s32.f32'cond 'Sd, 'Sm");
}
}
} else {
bool unsigned_integer = (instr->Bit(7) == 0);
if (dp_operation) {
if (unsigned_integer) {
Format(instr, "vcvt.f64.u32'cond 'Dd, 'Sm");
} else {
Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
}
} else {
if (unsigned_integer) {
Format(instr, "vcvt.f32.u32'cond 'Sd, 'Sm");
} else {
Format(instr, "vcvt.f32.s32'cond 'Sd, 'Sm");
}
}
}
}
@ -1024,9 +1092,27 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
if (instr->CoprocessorField() != 0xB) {
Unknown(instr); // Not used by V8.
if (instr->CoprocessorField() == 0xA) {
switch (instr->OpcodeField()) {
case 0x8:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Sd, ['rn - 4*'off8]");
} else {
Format(instr, "vstr'cond 'Sd, ['rn - 4*'off8]");
}
break;
case 0xC:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Sd, ['rn + 4*'off8]");
} else {
Format(instr, "vstr'cond 'Sd, ['rn + 4*'off8]");
}
break;
default:
Unknown(instr); // Not used by V8.
break;
}
} else if (instr->CoprocessorField() == 0xB) {
switch (instr->OpcodeField()) {
case 0x2:
// Load and store double to two GP registers
@ -1056,6 +1142,8 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
Unknown(instr); // Not used by V8.
break;
}
} else {
UNIMPLEMENTED(); // Not used by V8.
}
}

View File

@ -42,7 +42,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
// Helper function used from LoadIC/CallIC GenerateNormal.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss,
@ -531,7 +530,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
Label slow, fast;
Label slow, fast, check_pixel_array;
// Get the key and receiver object from the stack.
__ ldm(ia, sp, r0.bit() | r1.bit());
@ -569,6 +568,19 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r0, Operand(r3));
__ b(lo, &fast);
// Check whether the elements is a pixel array.
__ bind(&check_pixel_array);
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow);
__ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset));
__ cmp(r0, ip);
__ b(hs, &slow);
__ ldr(ip, FieldMemOperand(r1, PixelArray::kExternalPointerOffset));
__ ldrb(r0, MemOperand(ip, r0));
__ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Tag result as smi.
__ Ret();
// Slow case: Push extra copies of the arguments (2).
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
@ -599,10 +611,283 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
}
// Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double.
// Integer to convert is passed in register hiword.
// Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm,
Register hiword,
Register loword,
Register scratch,
int leading_zeroes) {
const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
const int mantissa_shift_for_hi_word =
meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
const int mantissa_shift_for_lo_word =
kBitsPerInt - mantissa_shift_for_hi_word;
__ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
if (mantissa_shift_for_hi_word > 0) {
__ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
__ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
} else {
__ mov(loword, Operand(0));
__ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
}
// If least significant bit of biased exponent was not 1 it was corrupted
// by most significant bit of mantissa so we should fix that.
if (!(biased_exponent & 1)) {
__ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
}
}
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// TODO(476): port specialized code.
GenerateGeneric(masm);
// ---------- S t a t e --------------
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
Label slow, failed_allocation;
// Get the key and receiver object from the stack.
__ ldm(ia, sp, r0.bit() | r1.bit());
// r0: key
// r1: receiver object
// Check that the object isn't a smi
__ BranchOnSmi(r1, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(r0, &slow);
// Check that the object is a JS object. Load map into r2.
__ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &slow);
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// r0: index (as a smi)
// r1: JSObject
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r2, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ ldr(ip, FieldMemOperand(r1, ExternalArray::kLengthOffset));
__ cmp(r1, Operand(r0, ASR, kSmiTagSize));
// Unsigned comparison catches both negative and too-large values.
__ b(lo, &slow);
// r0: index (smi)
// r1: elements array
__ ldr(r1, FieldMemOperand(r1, ExternalArray::kExternalPointerOffset));
// r1: base pointer of external storage
// We are not untagging smi key and instead work with it
// as if it was premultiplied by 2.
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
switch (array_type) {
case kExternalByteArray:
__ ldrsb(r0, MemOperand(r1, r0, LSR, 1));
break;
case kExternalUnsignedByteArray:
__ ldrb(r0, MemOperand(r1, r0, LSR, 1));
break;
case kExternalShortArray:
__ ldrsh(r0, MemOperand(r1, r0, LSL, 0));
break;
case kExternalUnsignedShortArray:
__ ldrh(r0, MemOperand(r1, r0, LSL, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ ldr(r0, MemOperand(r1, r0, LSL, 1));
break;
case kExternalFloatArray:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r0, r1, Operand(r0, LSL, 1));
__ vldr(s0, r0, 0);
} else {
__ ldr(r0, MemOperand(r1, r0, LSL, 1));
}
break;
default:
UNREACHABLE();
break;
}
// For integer array types:
// r0: value
// For floating-point array type
// s0: value (if VFP3 is supported)
// r0: value (if VFP3 is not supported)
if (array_type == kExternalIntArray) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
__ cmp(r0, Operand(0xC0000000));
__ b(mi, &box_int);
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
__ mov(r1, r0);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
__ AllocateHeapNumber(r0, r3, r4, &slow);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r1);
__ vcvt_f64_s32(d0, s0);
__ sub(r1, r0, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ Ret();
} else {
WriteInt32ToHeapNumberStub stub(r1, r0, r3);
__ TailCallStub(&stub);
}
} else if (array_type == kExternalUnsignedIntArray) {
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label box_int, done;
__ tst(r0, Operand(0xC0000000));
__ b(ne, &box_int);
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
__ vmov(s0, r0);
__ AllocateHeapNumber(r0, r1, r2, &slow);
__ vcvt_f64_u32(d0, s0);
__ sub(r1, r0, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
Label box_int_0, box_int_1, done;
__ tst(r0, Operand(0x80000000));
__ b(ne, &box_int_0);
__ tst(r0, Operand(0x40000000));
__ b(ne, &box_int_1);
// Tag integer as smi and return it.
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int_0);
// Integer does not have leading zeros.
GenerateUInt2Double(masm, r0, r1, r2, 0);
__ b(&done);
__ bind(&box_int_1);
// Integer has one leading zero.
GenerateUInt2Double(masm, r0, r1, r2, 1);
__ bind(&done);
// Integer was converted to double in registers r0:r1.
// Wrap it into a HeapNumber.
__ AllocateHeapNumber(r2, r3, r5, &slow);
__ str(r0, FieldMemOperand(r2, HeapNumber::kExponentOffset));
__ str(r1, FieldMemOperand(r2, HeapNumber::kMantissaOffset));
__ mov(r0, r2);
__ Ret();
}
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ AllocateHeapNumber(r0, r1, r2, &slow);
__ vcvt_f64_f32(d0, s0);
__ sub(r1, r0, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ Ret();
} else {
__ AllocateHeapNumber(r3, r1, r2, &slow);
// VFP is not available, do manual single to double conversion.
// r0: floating point value (binary32)
// Extract mantissa to r1.
__ and_(r1, r0, Operand(kBinary32MantissaMask));
// Extract exponent to r2.
__ mov(r2, Operand(r0, LSR, kBinary32MantissaBits));
__ and_(r2, r2, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ teq(r2, Operand(0x00));
__ b(eq, &exponent_rebiased);
__ teq(r2, Operand(0xff));
__ mov(r2, Operand(0x7ff), LeaveCC, eq);
__ b(eq, &exponent_rebiased);
// Rebias exponent.
__ add(r2,
r2,
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
__ bind(&exponent_rebiased);
__ and_(r0, r0, Operand(kBinary32SignMask));
__ orr(r0, r0, Operand(r2, LSL, HeapNumber::kMantissaBitsInTopWord));
// Shift mantissa.
static const int kMantissaShiftForHiWord =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaShiftForLoWord =
kBitsPerInt - kMantissaShiftForHiWord;
__ orr(r0, r0, Operand(r1, LSR, kMantissaShiftForHiWord));
__ mov(r1, Operand(r1, LSL, kMantissaShiftForLoWord));
__ str(r0, FieldMemOperand(r3, HeapNumber::kExponentOffset));
__ str(r1, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
__ mov(r0, r3);
__ Ret();
}
} else {
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
}
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r0, r1);
GenerateRuntimeGetProperty(masm);
}
@ -683,7 +968,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// -- sp[0] : key
// -- sp[1] : receiver
// -----------------------------------
Label slow, fast, array, extra, exit;
Label slow, fast, array, extra, exit, check_pixel_array;
// Get the key and the object from the stack.
__ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver
@ -716,7 +1001,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r2, ip);
__ b(ne, &slow);
__ b(ne, &check_pixel_array);
// Untag the key (for checking against untagged length in the fixed array).
__ mov(r1, Operand(r1, ASR, kSmiTagSize));
// Compute address to store into and check array bounds.
@ -731,6 +1016,37 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&slow);
GenerateRuntimeSetProperty(masm);
// Check whether the elements is a pixel array.
// r0: value
// r1: index (as a smi), zero-extended.
// r3: elements array
__ bind(&check_pixel_array);
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(r2, ip);
__ b(ne, &slow);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
__ BranchOnNotSmi(r0, &slow);
__ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the key.
__ ldr(ip, FieldMemOperand(r3, PixelArray::kLengthOffset));
__ cmp(r1, Operand(ip));
__ b(hs, &slow);
__ mov(r4, r0); // Save the value.
__ mov(r0, Operand(r0, ASR, kSmiTagSize)); // Untag the value.
{ // Clamp the value to [0..255].
Label done;
__ tst(r0, Operand(0xFFFFFF00));
__ b(eq, &done);
__ mov(r0, Operand(0), LeaveCC, mi); // 0 if negative.
__ mov(r0, Operand(255), LeaveCC, pl); // 255 if positive.
__ bind(&done);
}
__ ldr(r2, FieldMemOperand(r3, PixelArray::kExternalPointerOffset));
__ strb(r0, MemOperand(r2, r1));
__ mov(r0, Operand(r4)); // Return the original value.
__ Ret();
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
@ -793,10 +1109,376 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
}
// Convert int passed in register ival to IEE 754 single precision
// floating point value and store it into register fval.
// If VFP3 is available use it for conversion.
static void ConvertIntToFloat(MacroAssembler* masm,
Register ival,
Register fval,
Register scratch1,
Register scratch2) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, ival);
__ vcvt_f32_s32(s0, s0);
__ vmov(fval, s0);
} else {
Label not_special, done;
// Move sign bit from source to destination. This works because the sign
// bit in the exponent word of the double has the same position and polarity
// as the 2's complement sign bit in a Smi.
ASSERT(kBinary32SignMask == 0x80000000u);
__ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
// Negate value if it is negative.
__ rsb(ival, ival, Operand(0), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register ival contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
// greater than 1 (not a special case) or less than 1 (special case of 0).
__ cmp(ival, Operand(1));
__ b(gt, &not_special);
// For 1 or -1 we need to or in the 0 exponent (biased).
static const uint32_t exponent_word_for_1 =
kBinary32ExponentBias << kBinary32ExponentShift;
__ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
__ b(&done);
__ bind(&not_special);
// Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above.
Register zeros = scratch2;
__ CountLeadingZeros(ival, scratch1, zeros);
// Compute exponent and or it into the exponent register.
__ rsb(scratch1,
zeros,
Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
__ orr(fval,
fval,
Operand(scratch1, LSL, kBinary32ExponentShift));
// Shift up the source chopping the top bit off.
__ add(zeros, zeros, Operand(1));
// This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
__ mov(ival, Operand(ival, LSL, zeros));
// And the top (top 20 bits).
__ orr(fval,
fval,
Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
__ bind(&done);
}
}
static bool IsElementTypeSigned(ExternalArrayType array_type) {
switch (array_type) {
case kExternalByteArray:
case kExternalShortArray:
case kExternalIntArray:
return true;
case kExternalUnsignedByteArray:
case kExternalUnsignedShortArray:
case kExternalUnsignedIntArray:
return false;
default:
UNREACHABLE();
return false;
}
}
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// TODO(476): port specialized code.
GenerateGeneric(masm);
// ---------- S t a t e --------------
// -- r0 : value
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
// -----------------------------------
Label slow, check_heap_number;
// Get the key and the object from the stack.
__ ldm(ia, sp, r1.bit() | r2.bit()); // r1 = key, r2 = receiver
// Check that the object isn't a smi.
__ BranchOnSmi(r2, &slow);
// Check that the object is a JS object. Load map into r3
__ CompareObjectType(r2, r3, r4, FIRST_JS_OBJECT_TYPE);
__ b(le, &slow);
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(r1, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// r0: value
// r1: index (smi)
// r2: object
__ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
__ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r3, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the index.
__ ldr(ip, FieldMemOperand(r2, ExternalArray::kLengthOffset));
__ cmp(r1, ip);
// Unsigned comparison catches both negative and too-large values.
__ b(hs, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// r0: value
// r1: index (integer)
// r2: array
__ BranchOnNotSmi(r0, &check_heap_number);
__ mov(r3, Operand(r0, ASR, kSmiTagSize)); // Untag the value.
__ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
// r1: index (integer)
// r2: base pointer of external storage
// r3: value (integer)
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r3, MemOperand(r2, r1, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r3, MemOperand(r2, r1, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r3, MemOperand(r2, r1, LSL, 2));
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
ConvertIntToFloat(masm, r3, r4, r5, r6);
__ str(r4, MemOperand(r2, r1, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
// r0: value
__ Ret();
// r0: value
// r1: index (integer)
// r2: external array object
__ bind(&check_heap_number);
__ CompareObjectType(r0, r3, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
__ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r3, r0, Operand(kHeapObjectTag));
__ vldr(d0, r3, HeapNumber::kValueOffset);
if (array_type == kExternalFloatArray) {
__ vcvt_f32_f64(s0, d0);
__ vmov(r3, s0);
__ str(r3, MemOperand(r2, r1, LSL, 2));
} else {
Label done;
// Need to perform float-to-int conversion.
// Test for NaN.
__ vcmp(d0, d0);
// Move vector status bits to normal status bits.
__ vmrs(v8::internal::pc);
__ mov(r3, Operand(0), LeaveCC, vs); // NaN converts to 0
__ b(vs, &done);
// Test whether exponent equal to 0x7FF (infinity or NaN)
__ vmov(r4, r3, d0);
__ mov(r5, Operand(0x7FF00000));
__ and_(r3, r3, Operand(r5));
__ teq(r3, Operand(r5));
__ mov(r3, Operand(0), LeaveCC, eq);
// Not infinity or NaN simply convert to int
if (IsElementTypeSigned(array_type)) {
__ vcvt_s32_f64(s0, d0, ne);
} else {
__ vcvt_u32_f64(s0, d0, ne);
}
__ vmov(r3, s0, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r3, MemOperand(r2, r1, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r3, MemOperand(r2, r1, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r3, MemOperand(r2, r1, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
}
// r0: original value
__ Ret();
} else {
// VFP3 is not available do manual conversions
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ ldr(r4, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
if (array_type == kExternalFloatArray) {
Label done, nan_or_infinity_or_zero;
static const int kMantissaInHiWordShift =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaInLoWordShift =
kBitsPerInt - kMantissaInHiWordShift;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r5, Operand(HeapNumber::kExponentMask));
__ and_(r6, r3, Operand(r5), SetCC);
__ b(eq, &nan_or_infinity_or_zero);
__ teq(r6, Operand(r5));
__ mov(r6, Operand(kBinary32ExponentMask), LeaveCC, eq);
__ b(eq, &nan_or_infinity_or_zero);
// Rebias exponent.
__ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
__ add(r6,
r6,
Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
__ cmp(r6, Operand(kBinary32MaxExponent));
__ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, gt);
__ orr(r3, r3, Operand(kBinary32ExponentMask), LeaveCC, gt);
__ b(gt, &done);
__ cmp(r6, Operand(kBinary32MinExponent));
__ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, lt);
__ b(lt, &done);
__ and_(r7, r3, Operand(HeapNumber::kSignMask));
__ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
__ orr(r7, r7, Operand(r3, LSL, kMantissaInHiWordShift));
__ orr(r7, r7, Operand(r4, LSR, kMantissaInLoWordShift));
__ orr(r3, r7, Operand(r6, LSL, kBinary32ExponentShift));
__ bind(&done);
__ str(r3, MemOperand(r2, r1, LSL, 2));
__ Ret();
__ bind(&nan_or_infinity_or_zero);
__ and_(r7, r3, Operand(HeapNumber::kSignMask));
__ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
__ orr(r6, r6, r7);
__ orr(r6, r6, Operand(r3, LSL, kMantissaInHiWordShift));
__ orr(r3, r6, Operand(r4, LSR, kMantissaInLoWordShift));
__ b(&done);
} else {
bool is_signed_type = IsElementTypeSigned(array_type);
int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
Label done, sign;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r5, Operand(HeapNumber::kExponentMask));
__ and_(r6, r3, Operand(r5), SetCC);
__ mov(r3, Operand(0), LeaveCC, eq);
__ b(eq, &done);
__ teq(r6, Operand(r5));
__ mov(r3, Operand(0), LeaveCC, eq);
__ b(eq, &done);
// Unbias exponent.
__ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
__ sub(r6, r6, Operand(HeapNumber::kExponentBias), SetCC);
// If exponent is negative than result is 0.
__ mov(r3, Operand(0), LeaveCC, mi);
__ b(mi, &done);
// If exponent is too big than result is minimal value
__ cmp(r6, Operand(meaningfull_bits - 1));
__ mov(r3, Operand(min_value), LeaveCC, ge);
__ b(ge, &done);
__ and_(r5, r3, Operand(HeapNumber::kSignMask), SetCC);
__ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
__ orr(r3, r3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
__ rsb(r6, r6, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ mov(r3, Operand(r3, LSR, r6), LeaveCC, pl);
__ b(pl, &sign);
__ rsb(r6, r6, Operand(0));
__ mov(r3, Operand(r3, LSL, r6));
__ rsb(r6, r6, Operand(meaningfull_bits));
__ orr(r3, r3, Operand(r4, LSR, r6));
__ bind(&sign);
__ teq(r5, Operand(0));
__ rsb(r3, r3, Operand(0), LeaveCC, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r3, MemOperand(r2, r1, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r3, MemOperand(r2, r1, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r3, MemOperand(r2, r1, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
}
}
// Slow case: call runtime.
__ bind(&slow);
GenerateRuntimeSetProperty(masm);
}

View File

@ -1192,7 +1192,7 @@ void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
// ARMv7 VFP3 instructions to implement integer to double conversion.
mov(r7, Operand(inReg, ASR, kSmiTagSize));
vmov(s15, r7);
vcvt(d7, s15);
vcvt_f64_s32(d7, s15);
vmov(outLowReg, outHighReg, d7);
}
@ -1455,6 +1455,58 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
}
// Allocates a heap number or jumps to the need_gc label if the young space
// is full and a scavenge is needed.
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
result,
scratch1,
scratch2,
gc_required,
TAG_OBJECT);
// Get heap number map and store it in the allocated object.
LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
}
void MacroAssembler::CountLeadingZeros(Register source,
Register scratch,
Register zeros) {
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
clz(zeros, source); // This instruction is only supported after ARM5.
#else
mov(zeros, Operand(0));
mov(scratch, source);
// Top 16.
tst(scratch, Operand(0xffff0000));
add(zeros, zeros, Operand(16), LeaveCC, eq);
mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
// Top 8.
tst(scratch, Operand(0xff000000));
add(zeros, zeros, Operand(8), LeaveCC, eq);
mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
// Top 4.
tst(scratch, Operand(0xf0000000));
add(zeros, zeros, Operand(4), LeaveCC, eq);
mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
// Top 2.
tst(scratch, Operand(0xc0000000));
add(zeros, zeros, Operand(2), LeaveCC, eq);
mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
// Top bit.
tst(scratch, Operand(0x80000000u));
add(zeros, zeros, Operand(1), LeaveCC, eq);
#endif
}
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first,
Register second,

View File

@ -239,6 +239,12 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* gc_required);
// Allocates a heap number or jumps to the need_gc label if the young space
// is full and a scavenge is needed.
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Label* gc_required);
// ---------------------------------------------------------------------------
// Support functions.
@ -319,6 +325,12 @@ class MacroAssembler: public Assembler {
Register outHighReg,
Register outLowReg);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
// for 0 (31 instead of 32).
void CountLeadingZeros(Register source,
Register scratch,
Register zeros);
// ---------------------------------------------------------------------------
// Runtime calls

View File

@ -72,6 +72,8 @@ class Debugger {
int32_t GetRegisterValue(int regnum);
bool GetValue(const char* desc, int32_t* value);
bool GetVFPSingleValue(const char* desc, float* value);
bool GetVFPDoubleValue(const char* desc, double* value);
// Set or delete a breakpoint. Returns true if successful.
bool SetBreakpoint(Instr* breakpc);
@ -154,6 +156,28 @@ bool Debugger::GetValue(const char* desc, int32_t* value) {
}
bool Debugger::GetVFPSingleValue(const char* desc, float* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && !is_double) {
*value = sim_->get_float_from_s_register(regnum);
return true;
}
return false;
}
bool Debugger::GetVFPDoubleValue(const char* desc, double* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && is_double) {
*value = sim_->get_double_from_d_register(regnum);
return true;
}
return false;
}
bool Debugger::SetBreakpoint(Instr* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) {
@ -249,6 +273,8 @@ void Debugger::Debug() {
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
if (args == 2) {
int32_t value;
float svalue;
double dvalue;
if (strcmp(arg1, "all") == 0) {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
@ -257,6 +283,10 @@ void Debugger::Debug() {
} else {
if (GetValue(arg1, &value)) {
PrintF("%s: 0x%08x %d \n", arg1, value, value);
} else if (GetVFPSingleValue(arg1, &svalue)) {
PrintF("%s: %f \n", arg1, svalue);
} else if (GetVFPDoubleValue(arg1, &dvalue)) {
PrintF("%s: %lf \n", arg1, dvalue);
} else {
PrintF("%s unrecognized\n", arg1);
}
@ -1919,6 +1949,13 @@ void Simulator::DecodeUnconditional(Instr* instr) {
}
// Depending on value of last_bit flag glue register code from vm and m values
// (where m is expected to be a single bit).
static int GlueRegCode(bool last_bit, int vm, int m) {
return last_bit ? ((vm << 1) | m) : ((m << 4) | vm);
}
// void Simulator::DecodeTypeVFP(Instr* instr)
// The Following ARMv7 VFPv instructions are currently supported.
// vmov :Sn = Rt
@ -1933,115 +1970,213 @@ void Simulator::DecodeUnconditional(Instr* instr) {
// VMRS
void Simulator::DecodeTypeVFP(Instr* instr) {
ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
int rt = instr->RtField();
int vm = instr->VmField();
int vn = instr->VnField();
int vd = instr->VdField();
int vn = instr->VnField();
if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) {
// Other data processing instructions
if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
} else if (((instr->Opc2Field() >> 1) == 0x6) &&
(instr->Opc3Field() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
} else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Opc3Field() & 0x1)) {
DecodeVCMP(instr);
} else {
UNREACHABLE(); // Not used by V8.
}
} else if (instr->Opc1Field() == 0x3) {
if (instr->SzField() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
if (instr->Opc3Field() & 0x1) {
// vsub
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value - dm_value;
set_d_register_from_double(vd, dd_value);
} else {
// vadd
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value + dm_value;
set_d_register_from_double(vd, dd_value);
}
} else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
// vmul
if (instr->SzField() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
// vdiv
if (instr->SzField() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
if (instr->Bit(23) == 1) {
if ((instr->Bits(21, 19) == 0x7) &&
(instr->Bits(18, 16) == 0x5) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
double dm_val = get_double_from_d_register(vm);
int32_t int_value = static_cast<int32_t>(dm_val);
set_s_register_from_sinteger(((vd<<1) | instr->DField()), int_value);
} else if ((instr->Bits(21, 19) == 0x7) &&
(instr->Bits(18, 16) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(7) == 1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
int32_t int_value = get_sinteger_from_s_register(((vm<<1) |
instr->MField()));
double dbl_value = static_cast<double>(int_value);
set_d_register_from_double(vd, dbl_value);
} else if ((instr->Bit(21) == 0x0) &&
(instr->Bit(20) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value / dm_value;
set_d_register_from_double(vd, dd_value);
} else if ((instr->Bits(21, 20) == 0x3) &&
(instr->Bits(19, 16) == 0x4) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0x1) &&
(instr->Bit(4) == 0x0)) {
double dd_value = get_double_from_d_register(vd);
double dm_value = get_double_from_d_register(vm);
Compute_FPSCR_Flags(dd_value, dm_value);
} else if ((instr->Bits(23, 20) == 0xF) &&
(instr->Bits(19, 16) == 0x1) &&
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(7, 5) == 0x0) &&
(instr->Bit(4) == 0x1) &&
(instr->Bits(3, 0) == 0x0)) {
if (instr->Bits(15, 12) == 0xF)
} else {
UNIMPLEMENTED(); // Not used by V8.
}
} else {
if ((instr->VCField() == 0x0) &&
(instr->VAField() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
} else if ((instr->VLField() == 0x1) &&
(instr->VCField() == 0x0) &&
(instr->VAField() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
// vmrs
if (instr->RtField() == 0xF)
Copy_FPSCR_to_APSR();
else
UNIMPLEMENTED(); // Not used by V8.
} else {
UNIMPLEMENTED(); // Not used by V8.
}
} else if (instr->Bit(21) == 1) {
if ((instr->Bit(20) == 0x1) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value + dm_value;
set_d_register_from_double(vd, dd_value);
} else if ((instr->Bit(20) == 0x1) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value - dm_value;
set_d_register_from_double(vd, dd_value);
} else if ((instr->Bit(20) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value);
}
}
void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
(instr->VAField() == 0x0));
int t = instr->RtField();
int n = GlueRegCode(true, instr->VnField(), instr->NField());
bool to_arm_register = (instr->VLField() == 0x1);
if (to_arm_register) {
int32_t int_value = get_sinteger_from_s_register(n);
set_register(t, int_value);
} else {
int32_t rs_val = get_register(t);
set_s_register_from_sinteger(n, rs_val);
}
}
void Simulator::DecodeVCMP(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Opc3Field() & 0x1));
// Comparison.
bool dp_operation = (instr->SzField() == 1);
if (instr->Bit(7) != 0) {
// Raising exceptions for quiet NaNs are not supported.
UNIMPLEMENTED(); // Not used by V8.
}
int d = GlueRegCode(!dp_operation, instr->VdField(), instr->DField());
int m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField());
if (dp_operation) {
double dd_value = get_double_from_d_register(d);
double dm_value = get_double_from_d_register(m);
Compute_FPSCR_Flags(dd_value, dm_value);
} else {
UNIMPLEMENTED(); // Not used by V8.
}
}
void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
bool double_to_single = (instr->SzField() == 1);
int dst = GlueRegCode(double_to_single, instr->VdField(), instr->DField());
int src = GlueRegCode(!double_to_single, instr->VmField(), instr->MField());
if (double_to_single) {
double val = get_double_from_d_register(src);
set_s_register_from_float(dst, static_cast<float>(val));
} else {
if ((instr->Bit(20) == 0x0) &&
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(6, 5) == 0x0) &&
(instr->Bit(4) == 1) &&
(instr->Bits(3, 0) == 0x0)) {
int32_t rs_val = get_register(rt);
set_s_register_from_sinteger(((vn<<1) | instr->NField()), rs_val);
} else if ((instr->Bit(20) == 0x1) &&
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(6, 5) == 0x0) &&
(instr->Bit(4) == 1) &&
(instr->Bits(3, 0) == 0x0)) {
int32_t int_value = get_sinteger_from_s_register(((vn<<1) |
instr->NField()));
set_register(rt, int_value);
} else {
float val = get_float_from_s_register(src);
set_d_register_from_double(dst, static_cast<double>(val));
}
}
void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
(((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
// Conversion between floating-point and integer.
int vd = instr->VdField();
int d = instr->DField();
int vm = instr->VmField();
int m = instr->MField();
bool to_integer = (instr->Bit(18) == 1);
bool dp_operation = (instr->SzField() == 1);
if (to_integer) {
bool unsigned_integer = (instr->Bit(16) == 0);
if (instr->Bit(7) != 1) {
// Only rounding towards zero supported.
UNIMPLEMENTED(); // Not used by V8.
}
int dst = GlueRegCode(true, vd, d);
int src = GlueRegCode(!dp_operation, vm, m);
if (dp_operation) {
double val = get_double_from_d_register(src);
int sint = unsigned_integer ? static_cast<uint32_t>(val) :
static_cast<int32_t>(val);
set_s_register_from_sinteger(dst, sint);
} else {
float val = get_float_from_s_register(src);
int sint = unsigned_integer ? static_cast<uint32_t>(val) :
static_cast<int32_t>(val);
set_s_register_from_sinteger(dst, sint);
}
} else {
bool unsigned_integer = (instr->Bit(7) == 0);
int dst = GlueRegCode(!dp_operation, vd, d);
int src = GlueRegCode(true, vm, m);
int val = get_sinteger_from_s_register(src);
if (dp_operation) {
if (unsigned_integer) {
set_d_register_from_double(dst,
static_cast<double>((uint32_t)val));
} else {
set_d_register_from_double(dst, static_cast<double>(val));
}
} else {
if (unsigned_integer) {
set_s_register_from_float(dst,
static_cast<float>((uint32_t)val));
} else {
set_s_register_from_float(dst, static_cast<float>(val));
}
}
}
}
@ -2055,9 +2190,32 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
if (instr->CoprocessorField() != 0xB) {
UNIMPLEMENTED(); // Not used by V8.
if (instr->CoprocessorField() == 0xA) {
switch (instr->OpcodeField()) {
case 0x8:
case 0xC: { // Load and store float to memory.
int rn = instr->RnField();
int vd = instr->VdField();
int offset = instr->Immed8Field();
if (!instr->HasU()) {
offset = -offset;
}
int32_t address = get_register(rn) + 4 * offset;
if (instr->HasL()) {
// Load double from memory: vldr.
set_s_register_from_sinteger(vd, ReadW(address, instr));
} else {
// Store double to memory: vstr.
WriteW(address, get_sinteger_from_s_register(vd), instr);
}
break;
}
default:
UNIMPLEMENTED(); // Not used by V8.
break;
}
} else if (instr->CoprocessorField() == 0xB) {
switch (instr->OpcodeField()) {
case 0x2:
// Load and store double to two GP registers
@ -2106,6 +2264,8 @@ void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
UNIMPLEMENTED(); // Not used by V8.
break;
}
} else {
UNIMPLEMENTED(); // Not used by V8.
}
}

View File

@ -231,6 +231,11 @@ class Simulator {
void DecodeTypeVFP(Instr* instr);
void DecodeType6CoprocessorIns(Instr* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
void DecodeVCMP(Instr* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
// Executes one instruction.
void InstructionDecode(Instr* instr);

View File

@ -174,6 +174,15 @@ const int kBitsPerByteLog2 = 3;
const int kBitsPerPointer = kPointerSize * kBitsPerByte;
const int kBitsPerInt = kIntSize * kBitsPerByte;
// IEEE 754 single precision floating point number bit layout.
const uint32_t kBinary32SignMask = 0x80000000u;
const uint32_t kBinary32ExponentMask = 0x7f800000u;
const uint32_t kBinary32MantissaMask = 0x007fffffu;
const int kBinary32ExponentBias = 127;
const int kBinary32MaxExponent = 0xFE;
const int kBinary32MinExponent = 0x01;
const int kBinary32MantissaBits = 23;
const int kBinary32ExponentShift = 23;
// Zap-value: The value used for zapping dead objects.
// Should be a recognizable hex value tagged as a heap object pointer.