ARM: Smi refactoring and improvements.
Refactoring: * consistent use of SmiTag/Untag * added a few Smi macros and helpers Improvements * small optimisations (e.g. merging untag and cmp #0) * added fixed point to double conversion instructions for simpler conversions More on the last point: a Smi can be seen as a fixed point number with the a one bit fractional part. Fixed to double instructions allow us to convert a Smi to a double without untagging. BUG=none TEST=none Review URL: https://chromiumcodereview.appspot.com/15085026 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14724 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
3ad62f5ee1
commit
45ec481659
@ -2473,6 +2473,23 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
|
||||
}
|
||||
|
||||
|
||||
void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
|
||||
int fraction_bits,
|
||||
const Condition cond) {
|
||||
// Instruction details available in ARM DDI 0406C.b, A8-874.
|
||||
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
|
||||
// 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
|
||||
ASSERT(fraction_bits > 0 && fraction_bits <= 32);
|
||||
ASSERT(CpuFeatures::IsSupported(VFP3));
|
||||
int vd, d;
|
||||
dst.split_code(&vd, &d);
|
||||
int i = ((32 - fraction_bits) >> 4) & 1;
|
||||
int imm4 = (32 - fraction_bits) & 0xf;
|
||||
emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
|
||||
vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::vneg(const DwVfpRegister dst,
|
||||
const DwVfpRegister src,
|
||||
const Condition cond) {
|
||||
|
@ -459,6 +459,17 @@ class Operand BASE_EMBEDDED {
|
||||
|
||||
// rm <shift_op> shift_imm
|
||||
explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
|
||||
INLINE(static Operand SmiUntag(Register rm)) {
|
||||
return Operand(rm, ASR, kSmiTagSize);
|
||||
}
|
||||
INLINE(static Operand PointerOffsetFromSmiKey(Register key)) {
|
||||
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
||||
return Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize);
|
||||
}
|
||||
INLINE(static Operand DoubleOffsetFromSmiKey(Register key)) {
|
||||
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kDoubleSizeLog2);
|
||||
return Operand(key, LSL, kDoubleSizeLog2 - kSmiTagSize);
|
||||
}
|
||||
|
||||
// rm <shift_op> rs
|
||||
explicit Operand(Register rm, ShiftOp shift_op, Register rs);
|
||||
@ -515,6 +526,12 @@ class MemOperand BASE_EMBEDDED {
|
||||
// [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
|
||||
explicit MemOperand(Register rn, Register rm,
|
||||
ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
|
||||
INLINE(static MemOperand PointerAddressFromSmiKey(Register array,
|
||||
Register key,
|
||||
AddrMode am = Offset)) {
|
||||
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
||||
return MemOperand(array, key, LSL, kPointerSizeLog2 - kSmiTagSize, am);
|
||||
}
|
||||
|
||||
void set_offset(int32_t offset) {
|
||||
ASSERT(rm_.is(no_reg));
|
||||
@ -1032,6 +1049,9 @@ class Assembler : public AssemblerBase {
|
||||
const DwVfpRegister src,
|
||||
VFPConversionMode mode = kDefaultRoundToZero,
|
||||
const Condition cond = al);
|
||||
void vcvt_f64_s32(const DwVfpRegister dst,
|
||||
int fraction_bits,
|
||||
const Condition cond = al);
|
||||
|
||||
void vneg(const DwVfpRegister dst,
|
||||
const DwVfpRegister src,
|
||||
|
@ -215,12 +215,9 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
|
||||
// Allocate the JSArray object together with space for a FixedArray with the
|
||||
// requested number of elements.
|
||||
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
|
||||
__ mov(elements_array_end,
|
||||
Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
|
||||
__ add(elements_array_end,
|
||||
elements_array_end,
|
||||
Operand(array_size, ASR, kSmiTagSize));
|
||||
__ add(elements_array_end, elements_array_end, Operand::SmiUntag(array_size));
|
||||
__ Allocate(elements_array_end,
|
||||
result,
|
||||
scratch1,
|
||||
@ -249,7 +246,6 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
FieldMemOperand(result, JSArray::kElementsOffset));
|
||||
|
||||
// Clear the heap tag on the elements array.
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ sub(elements_array_storage,
|
||||
elements_array_storage,
|
||||
Operand(kHeapObjectTag));
|
||||
@ -261,7 +257,6 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
__ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
|
||||
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
|
||||
__ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
|
||||
__ str(array_size,
|
||||
MemOperand(elements_array_storage, kPointerSize, PostIndex));
|
||||
@ -270,10 +265,9 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
// result: JSObject
|
||||
// elements_array_storage: elements array element storage
|
||||
// array_size: smi-tagged size of elements array
|
||||
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
||||
__ add(elements_array_end,
|
||||
elements_array_storage,
|
||||
Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
Operand::PointerOffsetFromSmiKey(array_size));
|
||||
|
||||
// Fill the allocated FixedArray with the hole value if requested.
|
||||
// result: JSObject
|
||||
@ -335,7 +329,6 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
|
||||
__ bind(&argc_one_or_more);
|
||||
__ cmp(r0, Operand(1));
|
||||
__ b(ne, &argc_two_or_more);
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
|
||||
__ tst(r2, r2);
|
||||
__ b(ne, ¬_empty_array);
|
||||
@ -344,6 +337,7 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
|
||||
__ b(&empty_array);
|
||||
|
||||
__ bind(¬_empty_array);
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
|
||||
__ b(ne, call_generic_code);
|
||||
|
||||
@ -375,7 +369,7 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
|
||||
|
||||
// Handle construction of an array from a list of arguments.
|
||||
__ bind(&argc_two_or_more);
|
||||
__ mov(r2, Operand(r0, LSL, kSmiTagSize)); // Convet argc to a smi.
|
||||
__ SmiTag(r2, r0);
|
||||
|
||||
// r0: argc
|
||||
// r1: constructor
|
||||
@ -478,7 +472,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
|
||||
if (FLAG_debug_code) {
|
||||
// Initial map for the builtin InternalArray functions should be maps.
|
||||
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
|
||||
__ tst(r2, Operand(kSmiTagMask));
|
||||
__ SmiTst(r2);
|
||||
__ Assert(ne, "Unexpected initial map for InternalArray function");
|
||||
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
|
||||
__ Assert(eq, "Unexpected initial map for InternalArray function");
|
||||
@ -512,7 +506,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
|
||||
if (FLAG_debug_code) {
|
||||
// Initial map for the builtin Array functions should be maps.
|
||||
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
|
||||
__ tst(r2, Operand(kSmiTagMask));
|
||||
__ SmiTst(r2);
|
||||
__ Assert(ne, "Unexpected initial map for Array function");
|
||||
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
|
||||
__ Assert(eq, "Unexpected initial map for Array function");
|
||||
@ -545,7 +539,7 @@ void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
|
||||
// Array functions which always have a map.
|
||||
// Initial map for the builtin Array function should be a map.
|
||||
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
|
||||
__ tst(r3, Operand(kSmiTagMask));
|
||||
__ SmiTst(r3);
|
||||
__ Assert(ne, "Unexpected initial map for Array function");
|
||||
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
|
||||
__ Assert(eq, "Unexpected initial map for Array function");
|
||||
@ -778,7 +772,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
FrameScope scope(masm, StackFrame::CONSTRUCT);
|
||||
|
||||
// Preserve the two incoming parameters on the stack.
|
||||
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
|
||||
__ SmiTag(r0);
|
||||
__ push(r0); // Smi-tagged arguments count.
|
||||
__ push(r1); // Constructor function.
|
||||
|
||||
@ -931,7 +925,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
|
||||
__ str(r6, MemOperand(r2, kPointerSize, PostIndex));
|
||||
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
|
||||
__ mov(r0, Operand(r3, LSL, kSmiTagSize));
|
||||
__ SmiTag(r0, r3);
|
||||
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
|
||||
|
||||
// Initialize the fields to undefined.
|
||||
@ -1004,7 +998,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
|
||||
|
||||
// Set up number of arguments for function call below
|
||||
__ mov(r0, Operand(r3, LSR, kSmiTagSize));
|
||||
__ SmiUntag(r0, r3);
|
||||
|
||||
// Copy arguments and receiver to the expression stack.
|
||||
// r0: number of arguments
|
||||
@ -1459,7 +1453,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
||||
{
|
||||
// Enter an internal frame in order to preserve argument count.
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
|
||||
__ SmiTag(r0);
|
||||
__ push(r0);
|
||||
|
||||
__ push(r2);
|
||||
@ -1467,7 +1461,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
||||
__ mov(r2, r0);
|
||||
|
||||
__ pop(r0);
|
||||
__ mov(r0, Operand(r0, ASR, kSmiTagSize));
|
||||
__ SmiUntag(r0);
|
||||
|
||||
// Exit the internal frame.
|
||||
}
|
||||
@ -1570,7 +1564,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
||||
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ ldr(r2,
|
||||
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
|
||||
__ SmiUntag(r2);
|
||||
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
|
||||
__ SetCallKind(r5, CALL_AS_METHOD);
|
||||
__ cmp(r2, r0); // Check formal and actual parameter counts.
|
||||
@ -1609,7 +1603,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
||||
// here which will cause r2 to become negative.
|
||||
__ sub(r2, sp, r2);
|
||||
// Check if the arguments will overflow the stack.
|
||||
__ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ cmp(r2, Operand::PointerOffsetFromSmiKey(r0));
|
||||
__ b(gt, &okay); // Signed comparison.
|
||||
|
||||
// Out of stack space.
|
||||
@ -1719,7 +1713,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
||||
// Invoke the function.
|
||||
Label call_proxy;
|
||||
ParameterCount actual(r0);
|
||||
__ mov(r0, Operand(r0, ASR, kSmiTagSize));
|
||||
__ SmiUntag(r0);
|
||||
__ ldr(r1, MemOperand(fp, kFunctionOffset));
|
||||
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
|
||||
__ b(ne, &call_proxy);
|
||||
@ -1748,7 +1742,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
||||
|
||||
|
||||
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
|
||||
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
|
||||
__ SmiTag(r0);
|
||||
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
||||
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
|
||||
__ add(fp, sp, Operand(3 * kPointerSize));
|
||||
@ -1764,7 +1758,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
|
||||
__ ldr(r1, MemOperand(fp, -3 * kPointerSize));
|
||||
__ mov(sp, fp);
|
||||
__ ldm(ia_w, sp, fp.bit() | lr.bit());
|
||||
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
|
||||
__ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
|
||||
}
|
||||
|
||||
@ -1795,7 +1789,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
||||
// r1: function
|
||||
// r2: expected number of arguments
|
||||
// r3: code entry to call
|
||||
__ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
|
||||
// adjust for return address and receiver
|
||||
__ add(r0, r0, Operand(2 * kPointerSize));
|
||||
__ sub(r2, r0, Operand(r2, LSL, kPointerSizeLog2));
|
||||
@ -1826,7 +1820,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
||||
// r1: function
|
||||
// r2: expected number of arguments
|
||||
// r3: code entry to call
|
||||
__ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
|
||||
|
||||
// Copy the arguments (including the receiver) to the new stack frame.
|
||||
// r0: copy start address
|
||||
|
@ -321,13 +321,13 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
|
||||
__ b(eq, &install_unoptimized);
|
||||
__ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
|
||||
__ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4));
|
||||
__ ldr(r5, MemOperand(r5));
|
||||
__ cmp(r2, r5);
|
||||
__ b(ne, &loop);
|
||||
// Hit: fetch the optimized code.
|
||||
__ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4));
|
||||
__ add(r5, r5, Operand(kPointerSize));
|
||||
__ ldr(r4, MemOperand(r5));
|
||||
|
||||
@ -519,8 +519,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
|
||||
Register mantissa = result2_;
|
||||
|
||||
Label not_special;
|
||||
// Convert from Smi to integer.
|
||||
__ mov(source_, Operand(source_, ASR, kSmiTagSize));
|
||||
__ SmiUntag(source_);
|
||||
// Move sign bit from source to destination. This works because the sign bit
|
||||
// in the exponent word of the double has the same position and polarity as
|
||||
// the 2's complement sign bit in a Smi.
|
||||
@ -770,7 +769,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
||||
|
||||
// Lhs is a smi, rhs is a number.
|
||||
// Convert lhs to a double in d7.
|
||||
__ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
|
||||
__ SmiToDouble(d7, lhs);
|
||||
// Load the double from rhs, tagged HeapNumber r0, to d6.
|
||||
__ sub(r7, rhs, Operand(kHeapObjectTag));
|
||||
__ vldr(d6, r7, HeapNumber::kValueOffset);
|
||||
@ -801,7 +800,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
||||
__ sub(r7, lhs, Operand(kHeapObjectTag));
|
||||
__ vldr(d7, r7, HeapNumber::kValueOffset);
|
||||
// Convert rhs to a double in d6 .
|
||||
__ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
|
||||
__ SmiToDouble(d6, rhs);
|
||||
// Fall through to both_loaded_as_doubles.
|
||||
}
|
||||
|
||||
@ -1228,7 +1227,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
if (types_.Contains(SMI)) {
|
||||
// Smis: 0 -> false, all other -> true
|
||||
__ tst(tos_, Operand(kSmiTagMask));
|
||||
__ SmiTst(tos_);
|
||||
// tos_ contains the correct return value already
|
||||
__ Ret(eq);
|
||||
} else if (types_.NeedsMap()) {
|
||||
@ -1533,7 +1532,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
|
||||
__ b(mi, &try_float);
|
||||
|
||||
// Tag the result as a smi and we're done.
|
||||
__ mov(r0, Operand(r1, LSL, kSmiTagSize));
|
||||
__ SmiTag(r0, r1);
|
||||
__ Ret();
|
||||
|
||||
// Try to store the result in a heap number.
|
||||
@ -1880,9 +1879,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
|
||||
__ GetLeastBitsFromSmi(scratch2, right, 5);
|
||||
__ mov(scratch1, Operand(scratch1, LSL, scratch2));
|
||||
// Check that the signed result fits in a Smi.
|
||||
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
|
||||
__ b(mi, ¬_smi_result);
|
||||
__ SmiTag(right, scratch1);
|
||||
__ TrySmiTag(right, scratch1, ¬_smi_result);
|
||||
__ Ret();
|
||||
break;
|
||||
default:
|
||||
@ -1944,12 +1941,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
|
||||
|
||||
// Load left and right operands into d0 and d1.
|
||||
if (smi_operands) {
|
||||
__ SmiUntag(scratch1, right);
|
||||
__ vmov(d1.high(), scratch1);
|
||||
__ vcvt_f64_s32(d1, d1.high());
|
||||
__ SmiUntag(scratch1, left);
|
||||
__ vmov(d0.high(), scratch1);
|
||||
__ vcvt_f64_s32(d0, d0.high());
|
||||
__ SmiToDouble(d1, right);
|
||||
__ SmiToDouble(d0, left);
|
||||
} else {
|
||||
// Load right operand into d1.
|
||||
if (right_type == BinaryOpIC::INT32) {
|
||||
@ -2060,9 +2053,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// Check that the *signed* result fits in a smi.
|
||||
__ add(r3, r2, Operand(0x40000000), SetCC);
|
||||
__ b(mi, &result_not_a_smi);
|
||||
__ SmiTag(r0, r2);
|
||||
__ TrySmiTag(r0, r2, &result_not_a_smi);
|
||||
__ Ret();
|
||||
|
||||
// Allocate new heap number for result.
|
||||
@ -2122,7 +2113,6 @@ void BinaryOpStub_GenerateSmiCode(
|
||||
|
||||
// Perform combined smi check on both operands.
|
||||
__ orr(scratch1, left, Operand(right));
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ JumpIfNotSmi(scratch1, ¬_smis);
|
||||
|
||||
// If the smi-smi operation results in a smi return is generated.
|
||||
@ -2411,12 +2401,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// Check if the result fits in a smi.
|
||||
__ add(scratch1, r2, Operand(0x40000000), SetCC);
|
||||
// If not try to return a heap number. (We know the result is an int32.)
|
||||
__ b(mi, &return_heap_number);
|
||||
// Tag the result and return.
|
||||
__ SmiTag(r0, r2);
|
||||
// Check if the result fits in a smi. If not try to return a heap number.
|
||||
// (We know the result is an int32).
|
||||
__ TrySmiTag(r0, r2, &return_heap_number);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&return_heap_number);
|
||||
@ -2644,7 +2631,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// Input is a smi. Convert to double and load the low and high words
|
||||
// of the double into r2, r3.
|
||||
__ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
|
||||
__ SmiToDouble(d7, r0);
|
||||
__ vmov(r2, r3, d7);
|
||||
__ b(&loaded);
|
||||
|
||||
__ bind(&input_not_smi);
|
||||
@ -3842,7 +3830,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
|
||||
|
||||
// Read the argument from the stack and return it.
|
||||
__ sub(r3, r0, r1);
|
||||
__ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
|
||||
__ ldr(r0, MemOperand(r3, kDisplacement));
|
||||
__ Jump(lr);
|
||||
|
||||
@ -3856,7 +3844,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
|
||||
|
||||
// Read the argument from the adaptor frame and return it.
|
||||
__ sub(r3, r0, r1);
|
||||
__ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
|
||||
__ ldr(r0, MemOperand(r3, kDisplacement));
|
||||
__ Jump(lr);
|
||||
|
||||
@ -4109,7 +4097,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
||||
__ bind(&adaptor_frame);
|
||||
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
||||
__ str(r1, MemOperand(sp, 0));
|
||||
__ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
|
||||
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
|
||||
__ str(r3, MemOperand(sp, 1 * kPointerSize));
|
||||
|
||||
@ -4117,9 +4105,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
||||
// of the arguments object and the elements array in words.
|
||||
Label add_arguments_object;
|
||||
__ bind(&try_allocate);
|
||||
__ cmp(r1, Operand::Zero());
|
||||
__ SmiUntag(r1, SetCC);
|
||||
__ b(eq, &add_arguments_object);
|
||||
__ mov(r1, Operand(r1, LSR, kSmiTagSize));
|
||||
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
|
||||
__ bind(&add_arguments_object);
|
||||
__ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
|
||||
@ -4158,8 +4145,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
||||
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
|
||||
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
|
||||
__ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
|
||||
// Untag the length for the loop.
|
||||
__ mov(r1, Operand(r1, LSR, kSmiTagSize));
|
||||
__ SmiUntag(r1);
|
||||
|
||||
// Copy the fixed array slots.
|
||||
Label loop;
|
||||
@ -4228,7 +4214,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// Check that the first argument is a JSRegExp object.
|
||||
__ ldr(r0, MemOperand(sp, kJSRegExpOffset));
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ JumpIfSmi(r0, &runtime);
|
||||
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
|
||||
__ b(ne, &runtime);
|
||||
@ -4236,7 +4221,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
// Check that the RegExp has been compiled (data contains a fixed array).
|
||||
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
|
||||
if (FLAG_debug_code) {
|
||||
__ tst(regexp_data, Operand(kSmiTagMask));
|
||||
__ SmiTst(regexp_data);
|
||||
__ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
|
||||
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
|
||||
__ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
|
||||
@ -4341,7 +4326,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
__ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
|
||||
__ cmp(r3, Operand(r1));
|
||||
__ b(ls, &runtime);
|
||||
__ mov(r1, Operand(r1, ASR, kSmiTagSize));
|
||||
__ SmiUntag(r1);
|
||||
|
||||
STATIC_ASSERT(4 == kOneByteStringTag);
|
||||
STATIC_ASSERT(kTwoByteStringTag == 0);
|
||||
@ -4416,7 +4401,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
__ add(r2, r9, Operand(r1, LSL, r3));
|
||||
|
||||
__ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
|
||||
__ mov(r8, Operand(r8, ASR, kSmiTagSize));
|
||||
__ SmiUntag(r8);
|
||||
__ add(r3, r9, Operand(r8, LSL, r3));
|
||||
|
||||
// Argument 2 (r1): Previous index.
|
||||
@ -4503,13 +4488,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
__ ldr(r0,
|
||||
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
|
||||
__ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
|
||||
__ cmp(r2, Operand(r0, ASR, kSmiTagSize));
|
||||
__ cmp(r2, Operand::SmiUntag(r0));
|
||||
__ b(gt, &runtime);
|
||||
|
||||
// r1: number of capture registers
|
||||
// r4: subject string
|
||||
// Store the capture count.
|
||||
__ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
|
||||
__ SmiTag(r2, r1);
|
||||
__ str(r2, FieldMemOperand(last_match_info_elements,
|
||||
RegExpImpl::kLastCaptureCountOffset));
|
||||
// Store last subject and last input.
|
||||
@ -4553,7 +4538,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
// Read the value from the static offsets vector buffer.
|
||||
__ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
|
||||
// Store the smi value in the last match info.
|
||||
__ mov(r3, Operand(r3, LSL, kSmiTagSize));
|
||||
__ SmiTag(r3);
|
||||
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
|
||||
__ jmp(&next_capture);
|
||||
__ bind(&done);
|
||||
@ -4601,7 +4586,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
// (9) Sliced string. Replace subject with parent. Go to (4).
|
||||
// Load offset into r9 and replace subject string with parent.
|
||||
__ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
|
||||
__ mov(r9, Operand(r9, ASR, kSmiTagSize));
|
||||
__ SmiUntag(r9);
|
||||
__ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
|
||||
__ jmp(&check_underlying); // Go to (4).
|
||||
#endif // V8_INTERPRETED_REGEXP
|
||||
@ -4628,7 +4613,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
||||
// FixedArray.
|
||||
int objects_size =
|
||||
(JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
|
||||
__ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
|
||||
__ SmiUntag(r5, r1);
|
||||
__ add(r2, r5, Operand(objects_size));
|
||||
__ Allocate(
|
||||
r2, // In: Size, in words.
|
||||
@ -4671,7 +4656,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
||||
__ mov(r2, Operand(factory->fixed_array_map()));
|
||||
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
|
||||
// Set FixedArray length.
|
||||
__ mov(r6, Operand(r5, LSL, kSmiTagSize));
|
||||
__ SmiTag(r6, r5);
|
||||
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
|
||||
// Fill contents of fixed-array with undefined.
|
||||
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
|
||||
@ -4988,7 +4973,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
|
||||
__ cmp(ip, Operand(index_));
|
||||
__ b(ls, index_out_of_range_);
|
||||
|
||||
__ mov(index_, Operand(index_, ASR, kSmiTagSize));
|
||||
__ SmiUntag(index_);
|
||||
|
||||
StringCharLoadGenerator::Generate(masm,
|
||||
object_,
|
||||
@ -4996,7 +4981,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
|
||||
result_,
|
||||
&call_runtime_);
|
||||
|
||||
__ mov(result_, Operand(result_, LSL, kSmiTagSize));
|
||||
__ SmiTag(result_);
|
||||
__ bind(&exit_);
|
||||
}
|
||||
|
||||
@ -5042,7 +5027,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
|
||||
// is too complex (e.g., when the string needs to be flattened).
|
||||
__ bind(&call_runtime_);
|
||||
call_helper.BeforeCall(masm);
|
||||
__ mov(index_, Operand(index_, LSL, kSmiTagSize));
|
||||
__ SmiTag(index_);
|
||||
__ Push(object_, index_);
|
||||
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
|
||||
__ Move(result_, r0);
|
||||
@ -5068,8 +5053,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
|
||||
|
||||
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
|
||||
// At this point code register contains smi tagged ASCII char code.
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
|
||||
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
|
||||
__ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
|
||||
__ b(eq, &slow_case_);
|
||||
@ -5494,9 +5478,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// Make sure first argument is a string.
|
||||
__ ldr(r0, MemOperand(sp, kStringOffset));
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
// Do a JumpIfSmi, but fold its jump into the subsequent string test.
|
||||
__ tst(r0, Operand(kSmiTagMask));
|
||||
__ SmiTst(r0);
|
||||
Condition is_string = masm->IsObjectStringType(r0, r1, ne);
|
||||
ASSERT(is_string == eq);
|
||||
__ b(NegateCondition(is_string), &runtime);
|
||||
@ -5893,8 +5876,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
||||
__ bind(&strings_not_empty);
|
||||
}
|
||||
|
||||
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
|
||||
__ mov(r3, Operand(r3, ASR, kSmiTagSize));
|
||||
__ SmiUntag(r2);
|
||||
__ SmiUntag(r3);
|
||||
// Both strings are non-empty.
|
||||
// r0: first string
|
||||
// r1: second string
|
||||
@ -6236,7 +6219,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
|
||||
} else {
|
||||
// Untag before subtracting to avoid handling overflow.
|
||||
__ SmiUntag(r1);
|
||||
__ sub(r0, r1, SmiUntagOperand(r0));
|
||||
__ sub(r0, r1, Operand::SmiUntag(r0));
|
||||
}
|
||||
__ Ret();
|
||||
|
||||
@ -6270,10 +6253,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
|
||||
__ vldr(d1, r2, HeapNumber::kValueOffset);
|
||||
__ b(&left);
|
||||
__ bind(&right_smi);
|
||||
__ SmiUntag(r2, r0); // Can't clobber r0 yet.
|
||||
SwVfpRegister single_scratch = d2.low();
|
||||
__ vmov(single_scratch, r2);
|
||||
__ vcvt_f64_s32(d1, single_scratch);
|
||||
__ SmiToDouble(d1, r0);
|
||||
|
||||
__ bind(&left);
|
||||
__ JumpIfSmi(r1, &left_smi);
|
||||
@ -6283,10 +6263,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
|
||||
__ vldr(d0, r2, HeapNumber::kValueOffset);
|
||||
__ b(&done);
|
||||
__ bind(&left_smi);
|
||||
__ SmiUntag(r2, r1); // Can't clobber r1 yet.
|
||||
single_scratch = d3.low();
|
||||
__ vmov(single_scratch, r2);
|
||||
__ vcvt_f64_s32(d0, single_scratch);
|
||||
__ SmiToDouble(d0, r1);
|
||||
|
||||
__ bind(&done);
|
||||
// Compare operands.
|
||||
@ -6697,7 +6674,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
|
||||
|
||||
// Compute the capacity mask.
|
||||
__ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
|
||||
__ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
|
||||
__ SmiUntag(scratch1);
|
||||
__ sub(scratch1, scratch1, Operand(1));
|
||||
|
||||
// Generate an unrolled loop that performs a few probes before
|
||||
@ -6778,7 +6755,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
|
||||
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
|
||||
|
||||
__ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
|
||||
__ mov(mask, Operand(mask, ASR, kSmiTagSize));
|
||||
__ SmiUntag(mask);
|
||||
__ sub(mask, mask, Operand(1));
|
||||
|
||||
__ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
|
||||
@ -7176,7 +7153,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
|
||||
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
|
||||
__ bind(&fast_elements);
|
||||
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
|
||||
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
|
||||
__ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ str(r0, MemOperand(r6, 0));
|
||||
// Update the write barrier for the array store.
|
||||
@ -7188,7 +7165,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
|
||||
// and value is Smi.
|
||||
__ bind(&smi_element);
|
||||
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
|
||||
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
|
||||
__ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
|
||||
__ Ret();
|
||||
|
||||
|
@ -440,7 +440,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
||||
Label indirect_string_loaded;
|
||||
__ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
|
||||
__ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
|
||||
__ add(index, index, Operand(result, ASR, kSmiTagSize));
|
||||
__ add(index, index, Operand::SmiUntag(result));
|
||||
__ jmp(&indirect_string_loaded);
|
||||
|
||||
// Handle cons strings.
|
||||
@ -510,9 +510,9 @@ void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
|
||||
Register index,
|
||||
Register value) {
|
||||
if (FLAG_debug_code) {
|
||||
__ tst(index, Operand(kSmiTagMask));
|
||||
__ SmiTst(index);
|
||||
__ Check(eq, "Non-smi index");
|
||||
__ tst(value, Operand(kSmiTagMask));
|
||||
__ SmiTst(value);
|
||||
__ Check(eq, "Non-smi value");
|
||||
|
||||
__ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
|
||||
@ -540,10 +540,10 @@ void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
|
||||
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
|
||||
if (encoding == String::ONE_BYTE_ENCODING) {
|
||||
// Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline.
|
||||
__ strb(value, MemOperand(ip, index, LSR, 1));
|
||||
__ strb(value, MemOperand(ip, index, LSR, kSmiTagSize));
|
||||
} else {
|
||||
// No need to untag a smi for two-byte addressing.
|
||||
__ strh(value, MemOperand(ip, index));
|
||||
__ strh(value, MemOperand(ip, index)); // LSL(1 - kSmiTagSize).
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
|
||||
__ tst(reg, Operand(0xc0000000));
|
||||
__ Assert(eq, "Unable to encode value as smi");
|
||||
}
|
||||
__ mov(reg, Operand(reg, LSL, kSmiTagSize));
|
||||
__ SmiTag(reg);
|
||||
}
|
||||
}
|
||||
__ stm(db_w, sp, object_regs | non_object_regs);
|
||||
@ -154,7 +154,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
|
||||
int r = JSCallerSavedCode(i);
|
||||
Register reg = { r };
|
||||
if ((non_object_regs & (1 << r)) != 0) {
|
||||
__ mov(reg, Operand(reg, LSR, kSmiTagSize));
|
||||
__ SmiUntag(reg);
|
||||
}
|
||||
if (FLAG_debug_code &&
|
||||
(((object_regs |non_object_regs) & (1 << r)) == 0)) {
|
||||
|
@ -1102,6 +1102,7 @@ int Decoder::DecodeType7(Instruction* instr) {
|
||||
// vmov: Rt = Sn
|
||||
// vcvt: Dd = Sm
|
||||
// vcvt: Sd = Dm
|
||||
// vcvt.f64.s32 Dd, Dd, #<fbits>
|
||||
// Dd = vabs(Dm)
|
||||
// Dd = vneg(Dm)
|
||||
// Dd = vadd(Dn, Dm)
|
||||
@ -1138,6 +1139,13 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
|
||||
DecodeVCVTBetweenDoubleAndSingle(instr);
|
||||
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
|
||||
DecodeVCVTBetweenFloatingPointAndInteger(instr);
|
||||
} else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
|
||||
(instr->Bit(8) == 1)) {
|
||||
// vcvt.f64.s32 Dd, Dd, #<fbits>
|
||||
int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0));
|
||||
Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd");
|
||||
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
", #%d", fraction_bits);
|
||||
} else if (((instr->Opc2Value() >> 1) == 0x6) &&
|
||||
(instr->Opc3Value() & 0x1)) {
|
||||
DecodeVCVTBetweenFloatingPointAndInteger(instr);
|
||||
|
@ -1198,7 +1198,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
// Get the current entry of the array into register r3.
|
||||
__ ldr(r2, MemOperand(sp, 2 * kPointerSize));
|
||||
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ ldr(r3, MemOperand::PointerAddressFromSmiKey(r2, r0));
|
||||
|
||||
// Get the expected map from the stack or a smi in the
|
||||
// permanent slow case into register r2.
|
||||
@ -2263,23 +2263,18 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
|
||||
// BinaryOpStub::GenerateSmiSmiOperation for comments.
|
||||
switch (op) {
|
||||
case Token::SAR:
|
||||
__ b(&stub_call);
|
||||
__ GetLeastBitsFromSmi(scratch1, right, 5);
|
||||
__ mov(right, Operand(left, ASR, scratch1));
|
||||
__ bic(right, right, Operand(kSmiTagMask));
|
||||
break;
|
||||
case Token::SHL: {
|
||||
__ b(&stub_call);
|
||||
__ SmiUntag(scratch1, left);
|
||||
__ GetLeastBitsFromSmi(scratch2, right, 5);
|
||||
__ mov(scratch1, Operand(scratch1, LSL, scratch2));
|
||||
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
|
||||
__ b(mi, &stub_call);
|
||||
__ SmiTag(right, scratch1);
|
||||
__ TrySmiTag(right, scratch1, &stub_call);
|
||||
break;
|
||||
}
|
||||
case Token::SHR: {
|
||||
__ b(&stub_call);
|
||||
__ SmiUntag(scratch1, left);
|
||||
__ GetLeastBitsFromSmi(scratch2, right, 5);
|
||||
__ mov(scratch1, Operand(scratch1, LSR, scratch2));
|
||||
@ -2858,7 +2853,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
|
||||
&if_true, &if_false, &fall_through);
|
||||
|
||||
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
|
||||
__ tst(r0, Operand(kSmiTagMask));
|
||||
__ SmiTst(r0);
|
||||
Split(eq, if_true, if_false, fall_through);
|
||||
|
||||
context()->Plug(if_true, if_false);
|
||||
@ -2879,7 +2874,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
|
||||
&if_true, &if_false, &fall_through);
|
||||
|
||||
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
|
||||
__ tst(r0, Operand(kSmiTagMask | 0x80000000));
|
||||
__ NonNegativeSmiTst(r0);
|
||||
Split(eq, if_true, if_false, fall_through);
|
||||
|
||||
context()->Plug(if_true, if_false);
|
||||
@ -3006,16 +3001,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
|
||||
__ LoadInstanceDescriptors(r1, r4);
|
||||
// r4: descriptor array.
|
||||
// r3: valid entries in the descriptor array.
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
STATIC_ASSERT(kSmiTagSize == 1);
|
||||
STATIC_ASSERT(kPointerSize == 4);
|
||||
__ mov(ip, Operand(DescriptorArray::kDescriptorSize));
|
||||
__ mul(r3, r3, ip);
|
||||
// Calculate location of the first key name.
|
||||
__ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
|
||||
// Calculate the end of the descriptor array.
|
||||
__ mov(r2, r4);
|
||||
__ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
|
||||
|
||||
// Loop through all the keys in the descriptor array. If one of these is the
|
||||
// string "valueOf" the result is false.
|
||||
@ -3783,12 +3775,11 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
|
||||
|
||||
Label done, not_found;
|
||||
// tmp now holds finger offset as a smi.
|
||||
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
|
||||
__ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
|
||||
// r2 now holds finger offset as a smi.
|
||||
__ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
// r3 now points to the start of fixed array elements.
|
||||
__ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
|
||||
__ ldr(r2, MemOperand::PointerAddressFromSmiKey(r3, r2, PreIndex));
|
||||
// Note side effect of PreIndex: r3 now points to the key of the pair.
|
||||
__ cmp(key, r2);
|
||||
__ b(ne, ¬_found);
|
||||
@ -4751,9 +4742,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
|
||||
__ push(result_register());
|
||||
// Cook return address in link register to stack (smi encoded Code* delta)
|
||||
__ sub(r1, lr, Operand(masm_->CodeObject()));
|
||||
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ add(r1, r1, Operand(r1)); // Convert to smi.
|
||||
__ SmiTag(r1);
|
||||
|
||||
// Store result register while executing finally block.
|
||||
__ push(r1);
|
||||
@ -4807,8 +4796,7 @@ void FullCodeGenerator::ExitFinallyBlock() {
|
||||
|
||||
// Uncook return address and return.
|
||||
__ pop(result_register());
|
||||
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
|
||||
__ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
|
||||
__ SmiUntag(r1);
|
||||
__ add(pc, r1, Operand(masm_->CodeObject()));
|
||||
}
|
||||
|
||||
|
@ -290,10 +290,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
|
||||
__ b(hs, out_of_range);
|
||||
// Fast case: Do the load.
|
||||
__ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
// The key is a smi.
|
||||
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
||||
__ ldr(scratch2,
|
||||
MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
|
||||
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
||||
__ cmp(scratch2, ip);
|
||||
// In case the loaded value is the_hole we have to consult GetProperty
|
||||
@ -567,7 +564,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
|
||||
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
|
||||
__ cmp(r3, ip);
|
||||
__ b(ne, &slow_load);
|
||||
__ mov(r0, Operand(r2, ASR, kSmiTagSize));
|
||||
__ SmiUntag(r0, r2);
|
||||
// r0: untagged index
|
||||
__ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
|
||||
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
|
||||
@ -960,7 +957,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
|
||||
__ cmp(r3, ip);
|
||||
__ b(ne, &slow);
|
||||
__ mov(r2, Operand(r0, ASR, kSmiTagSize));
|
||||
__ SmiUntag(r2, r0);
|
||||
__ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
|
||||
__ Ret();
|
||||
|
||||
@ -1133,7 +1130,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
|
||||
__ JumpIfSmi(r1, &slow);
|
||||
|
||||
// Check that the key is an array index, that is Uint32.
|
||||
__ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
|
||||
__ NonNegativeSmiTst(r0);
|
||||
__ b(ne, &slow);
|
||||
|
||||
// Get the map of the receiver.
|
||||
@ -1321,8 +1318,7 @@ static void KeyedStoreGenerateGenericHelper(
|
||||
}
|
||||
// It's irrelevant whether array is smi-only or not when writing a smi.
|
||||
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ str(value, MemOperand(address));
|
||||
__ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
|
||||
__ Ret();
|
||||
|
||||
__ bind(&non_smi_value);
|
||||
@ -1338,7 +1334,7 @@ static void KeyedStoreGenerateGenericHelper(
|
||||
__ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
}
|
||||
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(address, address, Operand::PointerOffsetFromSmiKey(key));
|
||||
__ str(value, MemOperand(address));
|
||||
// Update write barrier for the elements array address.
|
||||
__ mov(scratch_value, value); // Preserve the value which is returned.
|
||||
|
@ -1448,7 +1448,6 @@ void LCodeGen::DoDivI(LDivI* instr) {
|
||||
|
||||
const Register left = ToRegister(instr->left());
|
||||
const Register right = ToRegister(instr->right());
|
||||
const Register scratch = scratch0();
|
||||
const Register result = ToRegister(instr->result());
|
||||
|
||||
// Check for x / 0.
|
||||
@ -1497,8 +1496,8 @@ void LCodeGen::DoDivI(LDivI* instr) {
|
||||
// to be tagged to Smis. If that is not possible, deoptimize.
|
||||
DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
|
||||
|
||||
__ TrySmiTag(left, &deoptimize, scratch);
|
||||
__ TrySmiTag(right, &deoptimize, scratch);
|
||||
__ TrySmiTag(left, &deoptimize);
|
||||
__ TrySmiTag(right, &deoptimize);
|
||||
|
||||
__ b(al, deferred->entry());
|
||||
__ bind(deferred->exit());
|
||||
@ -1950,7 +1949,7 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
|
||||
Label done;
|
||||
|
||||
// If the object is a smi return the object.
|
||||
__ tst(input, Operand(kSmiTagMask));
|
||||
__ SmiTst(input);
|
||||
__ Move(result, input, eq);
|
||||
__ b(eq, &done);
|
||||
|
||||
@ -1975,7 +1974,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
|
||||
ASSERT(!scratch.is(scratch0()));
|
||||
ASSERT(!scratch.is(object));
|
||||
|
||||
__ tst(object, Operand(kSmiTagMask));
|
||||
__ SmiTst(object);
|
||||
DeoptimizeIf(eq, instr->environment());
|
||||
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
|
||||
DeoptimizeIf(ne, instr->environment());
|
||||
@ -2261,7 +2260,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
||||
__ JumpIfSmi(reg, true_label);
|
||||
} else if (expected.NeedsMap()) {
|
||||
// If we need a map later and have a Smi -> deopt.
|
||||
__ tst(reg, Operand(kSmiTagMask));
|
||||
__ SmiTst(reg);
|
||||
DeoptimizeIf(eq, instr->environment());
|
||||
}
|
||||
|
||||
@ -2497,7 +2496,7 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
|
||||
int false_block = chunk_->LookupDestination(instr->false_block_id());
|
||||
|
||||
Register input_reg = EmitLoadRegister(instr->value(), ip);
|
||||
__ tst(input_reg, Operand(kSmiTagMask));
|
||||
__ SmiTst(input_reg);
|
||||
EmitBranch(true_block, false_block, eq);
|
||||
}
|
||||
|
||||
@ -3368,8 +3367,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
|
||||
// during bound check elimination with the index argument to the bounds
|
||||
// check, which can be tagged, so that case must be handled here, too.
|
||||
if (instr->hydrogen()->key()->representation().IsTagged()) {
|
||||
__ add(scratch, elements,
|
||||
Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
|
||||
} else {
|
||||
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
|
||||
}
|
||||
@ -3380,7 +3378,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
|
||||
// Check for the hole value.
|
||||
if (instr->hydrogen()->RequiresHoleCheck()) {
|
||||
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
|
||||
__ tst(result, Operand(kSmiTagMask));
|
||||
__ SmiTst(result);
|
||||
DeoptimizeIf(ne, instr->environment());
|
||||
} else {
|
||||
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
|
||||
@ -3523,7 +3521,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
||||
__ b(eq, &global_object);
|
||||
|
||||
// Deoptimize if the receiver is not a JS object.
|
||||
__ tst(receiver, Operand(kSmiTagMask));
|
||||
__ SmiTst(receiver);
|
||||
DeoptimizeIf(eq, instr->environment());
|
||||
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
|
||||
DeoptimizeIf(lt, instr->environment());
|
||||
@ -4221,7 +4219,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
||||
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
|
||||
Register value = ToRegister(instr->value());
|
||||
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
|
||||
__ tst(value, Operand(kSmiTagMask));
|
||||
__ SmiTst(value);
|
||||
DeoptimizeIf(eq, instr->environment());
|
||||
}
|
||||
} else if (FLAG_track_double_fields && representation.IsDouble()) {
|
||||
@ -4458,8 +4456,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
|
||||
// during bound check elimination with the index argument to the bounds
|
||||
// check, which can be tagged, so that case must be handled here, too.
|
||||
if (instr->hydrogen()->key()->representation().IsTagged()) {
|
||||
__ add(scratch, elements,
|
||||
Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
|
||||
} else {
|
||||
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
|
||||
}
|
||||
@ -5144,14 +5141,14 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
||||
|
||||
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
|
||||
LOperand* input = instr->value();
|
||||
__ tst(ToRegister(input), Operand(kSmiTagMask));
|
||||
__ SmiTst(ToRegister(input));
|
||||
DeoptimizeIf(ne, instr->environment());
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
|
||||
LOperand* input = instr->value();
|
||||
__ tst(ToRegister(input), Operand(kSmiTagMask));
|
||||
__ SmiTst(ToRegister(input));
|
||||
DeoptimizeIf(eq, instr->environment());
|
||||
}
|
||||
|
||||
@ -5830,7 +5827,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
|
||||
__ cmp(r0, null_value);
|
||||
DeoptimizeIf(eq, instr->environment());
|
||||
|
||||
__ tst(r0, Operand(kSmiTagMask));
|
||||
__ SmiTst(r0);
|
||||
DeoptimizeIf(eq, instr->environment());
|
||||
|
||||
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
|
||||
@ -5898,8 +5895,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
|
||||
__ cmp(index, Operand::Zero());
|
||||
__ b(lt, &out_of_object);
|
||||
|
||||
STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
|
||||
__ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
|
||||
__ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
|
||||
|
||||
__ b(&done);
|
||||
@ -5907,7 +5903,8 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
|
||||
__ bind(&out_of_object);
|
||||
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
|
||||
// Index is equal to negated out of object property index plus 1.
|
||||
__ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
||||
__ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
|
||||
__ ldr(result, FieldMemOperand(scratch,
|
||||
FixedArray::kHeaderSize - kPointerSize));
|
||||
__ bind(&done);
|
||||
|
@ -495,9 +495,7 @@ void MacroAssembler::RecordWrite(Register object,
|
||||
Label done;
|
||||
|
||||
if (smi_check == INLINE_SMI_CHECK) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
tst(value, Operand(kSmiTagMask));
|
||||
b(eq, &done);
|
||||
JumpIfSmi(value, &done);
|
||||
}
|
||||
|
||||
CheckPageFlag(value,
|
||||
@ -978,7 +976,7 @@ void MacroAssembler::InitializeNewString(Register string,
|
||||
Heap::RootListIndex map_index,
|
||||
Register scratch1,
|
||||
Register scratch2) {
|
||||
mov(scratch1, Operand(length, LSL, kSmiTagSize));
|
||||
SmiTag(scratch1, length);
|
||||
LoadRoot(scratch2, map_index);
|
||||
str(scratch1, FieldMemOperand(string, String::kLengthOffset));
|
||||
mov(scratch1, Operand(String::kEmptyHashField));
|
||||
@ -1221,7 +1219,7 @@ void MacroAssembler::InvokeFunction(Register fun,
|
||||
ldr(expected_reg,
|
||||
FieldMemOperand(code_reg,
|
||||
SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
|
||||
SmiUntag(expected_reg);
|
||||
ldr(code_reg,
|
||||
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
|
||||
|
||||
@ -1359,7 +1357,7 @@ void MacroAssembler::JumpToHandlerEntry() {
|
||||
mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
|
||||
ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
|
||||
add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
|
||||
add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump.
|
||||
add(pc, r1, Operand::SmiUntag(r2)); // Jump
|
||||
}
|
||||
|
||||
|
||||
@ -1575,7 +1573,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
|
||||
|
||||
// Compute the capacity mask.
|
||||
ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
|
||||
mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
|
||||
SmiUntag(t1);
|
||||
sub(t1, t1, Operand(1));
|
||||
|
||||
// Generate an unrolled loop that performs a few probes before giving up.
|
||||
@ -2095,14 +2093,10 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
|
||||
b(&store);
|
||||
|
||||
bind(&smi_value);
|
||||
Register untagged_value = scratch1;
|
||||
SmiUntag(untagged_value, value_reg);
|
||||
vmov(s2, untagged_value);
|
||||
vcvt_f64_s32(d0, s2);
|
||||
SmiToDouble(d0, value_reg);
|
||||
|
||||
bind(&store);
|
||||
add(scratch1, elements_reg,
|
||||
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
|
||||
add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
|
||||
vstr(d0, FieldMemOperand(scratch1,
|
||||
FixedDoubleArray::kHeaderSize - elements_offset));
|
||||
}
|
||||
@ -2390,70 +2384,21 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
|
||||
(1 << String::kArrayIndexValueBits));
|
||||
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
|
||||
// the low kHashShift bits.
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
|
||||
mov(index, Operand(hash, LSL, kSmiTagSize));
|
||||
SmiTag(index, hash);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
|
||||
Register outHighReg,
|
||||
Register outLowReg) {
|
||||
// ARMv7 VFP3 instructions to implement integer to double conversion.
|
||||
mov(r7, Operand(inReg, ASR, kSmiTagSize));
|
||||
vmov(s15, r7);
|
||||
vcvt_f64_s32(d7, s15);
|
||||
vmov(outLowReg, outHighReg, d7);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
|
||||
DwVfpRegister result,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Register heap_number_map,
|
||||
SwVfpRegister scratch3,
|
||||
Label* not_number,
|
||||
ObjectToDoubleFlags flags) {
|
||||
Label done;
|
||||
if ((flags & OBJECT_NOT_SMI) == 0) {
|
||||
Label not_smi;
|
||||
JumpIfNotSmi(object, ¬_smi);
|
||||
// Remove smi tag and convert to double.
|
||||
mov(scratch1, Operand(object, ASR, kSmiTagSize));
|
||||
vmov(scratch3, scratch1);
|
||||
vcvt_f64_s32(result, scratch3);
|
||||
b(&done);
|
||||
bind(¬_smi);
|
||||
void MacroAssembler::SmiToDouble(DwVfpRegister value, Register smi) {
|
||||
ASSERT(value.code() < 16);
|
||||
if (CpuFeatures::IsSupported(VFP3)) {
|
||||
vmov(value.low(), smi);
|
||||
vcvt_f64_s32(value, 1);
|
||||
} else {
|
||||
SmiUntag(ip, smi);
|
||||
vmov(value.low(), ip);
|
||||
vcvt_f64_s32(value, value.low());
|
||||
}
|
||||
// Check for heap number and load double value from it.
|
||||
ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||
sub(scratch2, object, Operand(kHeapObjectTag));
|
||||
cmp(scratch1, heap_number_map);
|
||||
b(ne, not_number);
|
||||
if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
|
||||
// If exponent is all ones the number is either a NaN or +/-Infinity.
|
||||
ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
|
||||
Sbfx(scratch1,
|
||||
scratch1,
|
||||
HeapNumber::kExponentShift,
|
||||
HeapNumber::kExponentBits);
|
||||
// All-one value sign extend to -1.
|
||||
cmp(scratch1, Operand(-1));
|
||||
b(eq, not_number);
|
||||
}
|
||||
vldr(result, scratch2, HeapNumber::kValueOffset);
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
|
||||
DwVfpRegister value,
|
||||
Register scratch1,
|
||||
SwVfpRegister scratch2) {
|
||||
mov(scratch1, Operand(smi, ASR, kSmiTagSize));
|
||||
vmov(scratch2, scratch1);
|
||||
vcvt_f64_s32(value, scratch2);
|
||||
}
|
||||
|
||||
|
||||
@ -2610,7 +2555,7 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst,
|
||||
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
|
||||
ubfx(dst, src, kSmiTagSize, num_least_bits);
|
||||
} else {
|
||||
mov(dst, Operand(src, ASR, kSmiTagSize));
|
||||
SmiUntag(dst, src);
|
||||
and_(dst, dst, Operand((1 << num_least_bits) - 1));
|
||||
}
|
||||
}
|
||||
@ -3005,7 +2950,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
|
||||
void MacroAssembler::UntagAndJumpIfSmi(
|
||||
Register dst, Register src, Label* smi_case) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
|
||||
SmiUntag(dst, src, SetCC);
|
||||
b(cc, smi_case); // Shifter carry is not set for a smi.
|
||||
}
|
||||
|
||||
@ -3013,7 +2958,7 @@ void MacroAssembler::UntagAndJumpIfSmi(
|
||||
void MacroAssembler::UntagAndJumpIfNotSmi(
|
||||
Register dst, Register src, Label* non_smi_case) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
|
||||
SmiUntag(dst, src, SetCC);
|
||||
b(cs, non_smi_case); // Shifter carry is set for a non-smi.
|
||||
}
|
||||
|
||||
@ -3120,7 +3065,6 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
|
||||
Register scratch2,
|
||||
Label* failure) {
|
||||
// Check that neither is a smi.
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
and_(scratch1, first, Operand(second));
|
||||
JumpIfSmi(scratch1, failure);
|
||||
JumpIfNonSmisNotBothSequentialAsciiStrings(first,
|
||||
|
@ -44,12 +44,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
|
||||
}
|
||||
|
||||
|
||||
inline Operand SmiUntagOperand(Register object) {
|
||||
return Operand(object, ASR, kSmiTagSize);
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Give alias names to registers
|
||||
const Register cp = { 8 }; // JavaScript context pointer
|
||||
const Register kRootRegister = { 10 }; // Roots array pointer.
|
||||
@ -62,16 +56,6 @@ enum TaggingMode {
|
||||
DONT_TAG_RESULT
|
||||
};
|
||||
|
||||
// Flags used for the ObjectToDoubleVFPRegister function.
|
||||
enum ObjectToDoubleFlags {
|
||||
// No special flags.
|
||||
NO_OBJECT_TO_DOUBLE_FLAGS = 0,
|
||||
// Object is known to be a non smi.
|
||||
OBJECT_NOT_SMI = 1 << 0,
|
||||
// Don't load NaNs or infinities, branch to the non number case instead.
|
||||
AVOID_NANS_AND_INFINITIES = 1 << 1
|
||||
};
|
||||
|
||||
|
||||
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
|
||||
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
|
||||
@ -974,31 +958,9 @@ class MacroAssembler: public Assembler {
|
||||
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
|
||||
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
|
||||
|
||||
// Uses VFP instructions to Convert a Smi to a double.
|
||||
void IntegerToDoubleConversionWithVFP3(Register inReg,
|
||||
Register outHighReg,
|
||||
Register outLowReg);
|
||||
|
||||
// Load the value of a number object into a VFP double register. If the object
|
||||
// is not a number a jump to the label not_number is performed and the VFP
|
||||
// double register is unchanged.
|
||||
void ObjectToDoubleVFPRegister(
|
||||
Register object,
|
||||
DwVfpRegister value,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Register heap_number_map,
|
||||
SwVfpRegister scratch3,
|
||||
Label* not_number,
|
||||
ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
|
||||
|
||||
// Load the value of a smi object into a VFP double register. The register
|
||||
// scratch1 can be the same register as smi in which case smi will hold the
|
||||
// untagged value afterwards.
|
||||
void SmiToDoubleVFPRegister(Register smi,
|
||||
DwVfpRegister value,
|
||||
Register scratch1,
|
||||
SwVfpRegister scratch2);
|
||||
// Load the value of a smi object into a double register.
|
||||
// The register value must be between d0 and d15.
|
||||
void SmiToDouble(DwVfpRegister value, Register smi);
|
||||
|
||||
// Check if a double can be exactly represented as a signed 32-bit integer.
|
||||
// Z flag set to one if true.
|
||||
@ -1228,18 +1190,21 @@ class MacroAssembler: public Assembler {
|
||||
// Try to convert int32 to smi. If the value is to large, preserve
|
||||
// the original value and jump to not_a_smi. Destroys scratch and
|
||||
// sets flags.
|
||||
void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
|
||||
mov(scratch, reg);
|
||||
SmiTag(scratch, SetCC);
|
||||
void TrySmiTag(Register reg, Label* not_a_smi) {
|
||||
TrySmiTag(reg, reg, not_a_smi);
|
||||
}
|
||||
void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
|
||||
SmiTag(ip, src, SetCC);
|
||||
b(vs, not_a_smi);
|
||||
mov(reg, scratch);
|
||||
mov(reg, ip);
|
||||
}
|
||||
|
||||
|
||||
void SmiUntag(Register reg, SBit s = LeaveCC) {
|
||||
mov(reg, Operand(reg, ASR, kSmiTagSize), s);
|
||||
mov(reg, Operand::SmiUntag(reg), s);
|
||||
}
|
||||
void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
|
||||
mov(dst, Operand(src, ASR, kSmiTagSize), s);
|
||||
mov(dst, Operand::SmiUntag(src), s);
|
||||
}
|
||||
|
||||
// Untag the source value into destination and jump if source is a smi.
|
||||
@ -1250,6 +1215,13 @@ class MacroAssembler: public Assembler {
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
|
||||
|
||||
// Test if the register contains a smi (Z == 0 (eq) if true).
|
||||
inline void SmiTst(Register value) {
|
||||
tst(value, Operand(kSmiTagMask));
|
||||
}
|
||||
inline void NonNegativeSmiTst(Register value) {
|
||||
tst(value, Operand(kSmiTagMask | kSmiSignMask));
|
||||
}
|
||||
// Jump if the register contains a smi.
|
||||
inline void JumpIfSmi(Register value, Label* smi_label) {
|
||||
tst(value, Operand(kSmiTagMask));
|
||||
|
@ -2698,6 +2698,7 @@ void Simulator::DecodeType7(Instruction* instr) {
|
||||
// vmov :Rt = Sn
|
||||
// vcvt: Dd = Sm
|
||||
// vcvt: Sd = Dm
|
||||
// vcvt.f64.s32 Dd, Dd, #<fbits>
|
||||
// Dd = vabs(Dm)
|
||||
// Dd = vneg(Dm)
|
||||
// Dd = vadd(Dn, Dm)
|
||||
@ -2746,6 +2747,13 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
|
||||
DecodeVCVTBetweenDoubleAndSingle(instr);
|
||||
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
|
||||
DecodeVCVTBetweenFloatingPointAndInteger(instr);
|
||||
} else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
|
||||
(instr->Bit(8) == 1)) {
|
||||
// vcvt.f64.s32 Dd, Dd, #<fbits>
|
||||
int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0));
|
||||
int fixed_value = get_sinteger_from_s_register(vd * 2);
|
||||
double divide = 1 << fraction_bits;
|
||||
set_d_register_from_double(vd, fixed_value / divide);
|
||||
} else if (((instr->Opc2Value() >> 1) == 0x6) &&
|
||||
(instr->Opc3Value() & 0x1)) {
|
||||
DecodeVCVTBetweenFloatingPointAndInteger(instr);
|
||||
|
@ -1680,8 +1680,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
||||
|
||||
// Get the array's length into r0 and calculate new length.
|
||||
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
STATIC_ASSERT(kSmiTagSize == 1);
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ add(r0, r0, Operand(Smi::FromInt(argc)));
|
||||
|
||||
// Get the elements' length.
|
||||
@ -1701,8 +1699,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
||||
// Store the value.
|
||||
// We may need a register containing the address end_elements below,
|
||||
// so write back the value in end_elements.
|
||||
__ add(end_elements, elements,
|
||||
Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
|
||||
const int kEndElementsOffset =
|
||||
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
|
||||
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
|
||||
@ -1722,8 +1719,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
||||
|
||||
// Get the array's length into r0 and calculate new length.
|
||||
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
||||
STATIC_ASSERT(kSmiTagSize == 1);
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ add(r0, r0, Operand(Smi::FromInt(argc)));
|
||||
|
||||
// Get the elements' length.
|
||||
@ -1797,8 +1792,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
||||
// Store the value.
|
||||
// We may need a register containing the address end_elements below,
|
||||
// so write back the value in end_elements.
|
||||
__ add(end_elements, elements,
|
||||
Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
|
||||
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
|
||||
|
||||
__ RecordWrite(elements,
|
||||
@ -1835,8 +1829,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
||||
|
||||
const int kAllocationDelta = 4;
|
||||
// Load top and check if it is the end of elements.
|
||||
__ add(end_elements, elements,
|
||||
Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
|
||||
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
|
||||
__ mov(r7, Operand(new_space_allocation_top));
|
||||
__ ldr(r3, MemOperand(r7));
|
||||
@ -1932,11 +1925,9 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
|
||||
|
||||
// Get the last element.
|
||||
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
|
||||
STATIC_ASSERT(kSmiTagSize == 1);
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
// We can't address the last element in one operation. Compute the more
|
||||
// expensive shift first, and use an offset later on.
|
||||
__ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(elements, elements, Operand::PointerOffsetFromSmiKey(r4));
|
||||
__ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
|
||||
__ cmp(r0, r6);
|
||||
__ b(eq, &call_builtin);
|
||||
@ -2158,7 +2149,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
|
||||
if (cell.is_null()) {
|
||||
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
|
||||
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ JumpIfSmi(r1, &miss);
|
||||
|
||||
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
|
||||
@ -2176,7 +2166,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
|
||||
|
||||
// Check the code is a smi.
|
||||
Label slow;
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ JumpIfNotSmi(code, &slow);
|
||||
|
||||
// Convert the smi code to uint16.
|
||||
@ -2230,7 +2219,6 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
|
||||
|
||||
if (cell.is_null()) {
|
||||
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ JumpIfSmi(r1, &miss);
|
||||
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
|
||||
name, &miss);
|
||||
@ -2245,8 +2233,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
|
||||
__ ldr(r0, MemOperand(sp, 0 * kPointerSize));
|
||||
|
||||
// If the argument is a smi, just return.
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ tst(r0, Operand(kSmiTagMask));
|
||||
__ SmiTst(r0);
|
||||
__ Drop(argc + 1, eq);
|
||||
__ Ret(eq);
|
||||
|
||||
@ -2292,11 +2279,9 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
|
||||
__ bind(&smi_check);
|
||||
// Check if the result can fit into an smi. If we had an overflow,
|
||||
// the result is either 0x80000000 or 0x7FFFFFFF and won't fit into an smi.
|
||||
__ add(r1, r0, Operand(0x40000000), SetCC);
|
||||
// If result doesn't fit into an smi, branch to slow.
|
||||
__ b(&slow, mi);
|
||||
// Tag the result.
|
||||
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
|
||||
__ SmiTag(r0, SetCC);
|
||||
__ b(vs, &slow);
|
||||
|
||||
__ bind(&just_return);
|
||||
__ Drop(argc + 1);
|
||||
@ -2341,7 +2326,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
|
||||
GenerateNameCheck(name, &miss);
|
||||
if (cell.is_null()) {
|
||||
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ JumpIfSmi(r1, &miss);
|
||||
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
|
||||
name, &miss);
|
||||
@ -2357,7 +2341,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
|
||||
|
||||
// Check if the argument is a smi.
|
||||
Label not_smi;
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ JumpIfNotSmi(r0, ¬_smi);
|
||||
|
||||
// Do bitwise not or do nothing depending on the sign of the
|
||||
@ -3237,8 +3220,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
|
||||
Register key = r0;
|
||||
Register receiver = r1;
|
||||
|
||||
__ JumpIfNotSmi(key, &miss_force_generic);
|
||||
__ mov(r2, Operand(key, ASR, kSmiTagSize));
|
||||
__ UntagAndJumpIfNotSmi(r2, key, &miss_force_generic);
|
||||
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
|
||||
__ Ret();
|
||||
@ -3270,7 +3252,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
|
||||
static void GenerateSmiKeyCheck(MacroAssembler* masm,
|
||||
Register key,
|
||||
Register scratch0,
|
||||
Register scratch1,
|
||||
DwVfpRegister double_scratch0,
|
||||
DwVfpRegister double_scratch1,
|
||||
Label* fail) {
|
||||
@ -3288,8 +3269,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
|
||||
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
|
||||
__ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
|
||||
__ b(ne, fail);
|
||||
__ TrySmiTag(scratch0, fail, scratch1);
|
||||
__ mov(key, scratch0);
|
||||
__ TrySmiTag(key, scratch0, fail);
|
||||
__ bind(&key_ok);
|
||||
}
|
||||
|
||||
@ -3315,7 +3295,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
|
||||
GenerateSmiKeyCheck(masm, key, r4, d1, d2, &miss_force_generic);
|
||||
|
||||
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
|
||||
@ -3330,11 +3310,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
||||
// r3: external array.
|
||||
if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
|
||||
// Double to pixel conversion is only implemented in the runtime for now.
|
||||
__ JumpIfNotSmi(value, &slow);
|
||||
__ UntagAndJumpIfNotSmi(r5, value, &slow);
|
||||
} else {
|
||||
__ JumpIfNotSmi(value, &check_heap_number);
|
||||
__ UntagAndJumpIfNotSmi(r5, value, &check_heap_number);
|
||||
}
|
||||
__ SmiUntag(r5, value);
|
||||
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
|
||||
|
||||
// r3: base pointer of external storage.
|
||||
@ -3505,7 +3484,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
|
||||
GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
|
||||
|
||||
if (IsFastSmiElementsKind(elements_kind)) {
|
||||
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
|
||||
@ -3539,20 +3518,14 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
|
||||
__ add(scratch,
|
||||
elements_reg,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
||||
__ add(scratch,
|
||||
scratch,
|
||||
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
|
||||
__ str(value_reg, MemOperand(scratch));
|
||||
} else {
|
||||
ASSERT(IsFastObjectElementsKind(elements_kind));
|
||||
__ add(scratch,
|
||||
elements_reg,
|
||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
||||
__ add(scratch,
|
||||
scratch,
|
||||
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
|
||||
__ str(value_reg, MemOperand(scratch));
|
||||
__ mov(receiver_reg, value_reg);
|
||||
__ RecordWrite(elements_reg, // Object.
|
||||
@ -3666,7 +3639,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
|
||||
// have been verified by the caller to not be a smi.
|
||||
|
||||
// Check that the key is a smi or a heap number convertible to a smi.
|
||||
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
|
||||
GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
|
||||
|
||||
__ ldr(elements_reg,
|
||||
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
|
||||
|
@ -232,6 +232,7 @@ TEST(4) {
|
||||
double g;
|
||||
double h;
|
||||
int i;
|
||||
double j;
|
||||
double m;
|
||||
double n;
|
||||
float x;
|
||||
@ -294,6 +295,12 @@ TEST(4) {
|
||||
__ vcvt_f64_s32(d4, s31);
|
||||
__ vstr(d4, r4, OFFSET_OF(T, f));
|
||||
|
||||
// Convert from fixed point to floating point.
|
||||
__ mov(lr, Operand(1234));
|
||||
__ vmov(s8, lr);
|
||||
__ vcvt_f64_s32(d4, 1);
|
||||
__ vstr(d4, r4, OFFSET_OF(T, j));
|
||||
|
||||
// Test vabs.
|
||||
__ vldr(d1, r4, OFFSET_OF(T, g));
|
||||
__ vabs(d0, d1);
|
||||
@ -332,6 +339,7 @@ TEST(4) {
|
||||
t.g = -2718.2818;
|
||||
t.h = 31415926.5;
|
||||
t.i = 0;
|
||||
t.j = 0;
|
||||
t.m = -2718.2818;
|
||||
t.n = 123.456;
|
||||
t.x = 4.5;
|
||||
@ -345,6 +353,7 @@ TEST(4) {
|
||||
CHECK_EQ(2, t.i);
|
||||
CHECK_EQ(2718.2818, t.g);
|
||||
CHECK_EQ(31415926.5, t.h);
|
||||
CHECK_EQ(617.0, t.j);
|
||||
CHECK_EQ(42.0, t.f);
|
||||
CHECK_EQ(1.0, t.e);
|
||||
CHECK_EQ(1.000000059604644775390625, t.d);
|
||||
|
@ -578,6 +578,8 @@ TEST(Vfp) {
|
||||
"eeb80be0 vcvt.f64.s32 d0, s1");
|
||||
COMPARE(vcvt_f32_s32(s0, s2),
|
||||
"eeb80ac1 vcvt.f32.s32 s0, s2");
|
||||
COMPARE(vcvt_f64_s32(d0, 1),
|
||||
"eeba0bef vcvt.f64.s32 d0, d0, #1");
|
||||
|
||||
if (CpuFeatures::IsSupported(VFP32DREGS)) {
|
||||
COMPARE(vmov(d3, d27),
|
||||
|
Loading…
Reference in New Issue
Block a user