ARM: Handle bitwise operations with literal Smi for 32bits integers without calling the GenericBinaryOpStub. Refactored and updated the routine to convert a signed int to a double. This is a commit of http://codereview.chromium.org/3247008 for Rodolph Perfetta.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5401 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
erik.corry@gmail.com 2010-09-02 08:30:52 +00:00
parent 5628d3c482
commit 76e3e2afbb
4 changed files with 358 additions and 143 deletions

View File

@ -1463,95 +1463,6 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
}
// Tries to get a signed int32 out of a double precision floating point heap
// number. Rounds towards 0. Fastest for doubles that are in the ranges
// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
// almost to the range of signed int32 values that are not Smis. Jumps to the
// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
// (excluding the endpoints).
static void GetInt32(MacroAssembler* masm,
Register source,
Register dest,
Register scratch,
Register scratch2,
Label* slow) {
Label right_exponent, done;
// Get exponent word.
__ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
__ Ubfx(scratch2,
scratch,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
// Load dest with zero. We use this either for the final shift or
// for the answer.
__ mov(dest, Operand(0));
// Check whether the exponent matches a 32 bit signed int that is not a Smi.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
// the exponent that we are fastest at and also the highest exponent we can
// handle here.
const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
// The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
// split it up to avoid a constant pool entry. You can't do that in general
// for cmp because of the overflow flag, but we know the exponent is in the
// range 0-2047 so there is no overflow.
int fudge_factor = 0x400;
__ sub(scratch2, scratch2, Operand(fudge_factor));
__ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
// If we have a match of the int32-but-not-Smi exponent then skip some logic.
__ b(eq, &right_exponent);
// If the exponent is higher than that then go to slow case. This catches
// numbers that don't fit in a signed int32, infinities and NaNs.
__ b(gt, slow);
// We know the exponent is smaller than 30 (biased). If it is less than
// 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
// it rounds to zero.
const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
__ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
// Dest already has a Smi zero.
__ b(lt, &done);
if (!CpuFeatures::IsSupported(VFP3)) {
// We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
// get how much to shift down.
__ rsb(dest, scratch2, Operand(30));
}
__ bind(&right_exponent);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions implementing double precision to integer
// conversion using round to zero.
__ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
__ vmov(d7, scratch2, scratch);
__ vcvt_s32_f64(s15, d7);
__ vmov(dest, s15);
} else {
// Get the top bits of the mantissa.
__ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
// Put back the implicit 1.
__ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
// Shift up the mantissa bits to take up the space the exponent used to
// take. We just orred in the implicit bit so that took care of one and
// we want to leave the sign bit 0 so we subtract 2 bits from the shift
// distance.
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
__ mov(scratch2, Operand(scratch2, LSL, shift_distance));
// Put sign in zero flag.
__ tst(scratch, Operand(HeapNumber::kSignMask));
// Get the second half of the double. For some exponents we don't
// actually need this because the bits get shifted out again, but
// it's probably slower to test than just to do it.
__ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
// Shift down 22 bits to get the last 10 bits.
__ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
// Move down according to the exponent.
__ mov(dest, Operand(scratch, LSR, dest));
// Fix sign if sign bit was set.
__ rsb(dest, dest, Operand(0), LeaveCC, ne);
}
__ bind(&done);
}
// For bitwise ops where the inputs are not both Smis we here try to determine
// whether both inputs are either Smis or at least heap numbers that can be
// represented by a 32 bit signed value. We truncate towards zero as required
@ -1574,7 +1485,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
GetInt32(masm, lhs, r3, r5, r4, &slow);
__ ConvertToInt32(lhs, r3, r5, r4, &slow);
__ jmp(&done_checking_lhs);
__ bind(&lhs_is_smi);
__ mov(r3, Operand(lhs, ASR, 1));
@ -1585,7 +1496,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
GetInt32(masm, rhs, r2, r5, r4, &slow);
__ ConvertToInt32(rhs, r2, r5, r4, &slow);
__ jmp(&done_checking_rhs);
__ bind(&rhs_is_smi);
__ mov(r2, Operand(rhs, ASR, 1));
@ -2440,7 +2351,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow);
// Convert the heap number is r0 to an untagged integer in r1.
GetInt32(masm, r0, r1, r2, r3, &slow);
__ ConvertToInt32(r0, r1, r2, r3, &slow);
// Do the bitwise operation (move negated) and check if the result
// fits in a smi.

View File

@ -917,16 +917,55 @@ class DeferredInlineSmiOperation: public DeferredCode {
}
virtual void Generate();
// This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
// Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty
// methods, it is the responsibility of the deferred code to save and restore
// registers.
virtual bool AutoSaveAndRestore() { return false; }
void JumpToNonSmiInput(Condition cond);
void JumpToAnswerOutOfRange(Condition cond);
private:
void GenerateNonSmiInput();
void GenerateAnswerOutOfRange();
void WriteNonSmiAnswer(Register answer,
Register heap_number,
Register scratch);
Token::Value op_;
int value_;
bool reversed_;
OverwriteMode overwrite_mode_;
Register tos_register_;
Label non_smi_input_;
Label answer_out_of_range_;
};
// For bit operations we try harder and handle the case where the input is not
// a Smi but a 32bits integer without calling the generic stub.
void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) {
ASSERT(Token::IsBitOp(op_));
__ b(cond, &non_smi_input_);
}
// For bit operations the result is always 32bits so we handle the case where
// the result does not fit in a Smi without calling the generic stub.
void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
ASSERT(Token::IsBitOp(op_));
if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
// >>> requires an unsigned to double conversion and the non VFP code
// does not support this conversion.
__ b(cond, entry_label());
} else {
__ b(cond, &answer_out_of_range_);
}
}
// On entry the non-constant side of the binary operation is in tos_register_
// and the constant smi side is nowhere. The tos_register_ is not used by the
@ -1005,6 +1044,172 @@ void DeferredInlineSmiOperation::Generate() {
// came into this function with, so we can merge back to that frame
// without trashing it.
copied_frame.MergeTo(frame_state()->frame());
Exit();
if (non_smi_input_.is_linked()) {
GenerateNonSmiInput();
}
if (answer_out_of_range_.is_linked()) {
GenerateAnswerOutOfRange();
}
}
// Convert and write the integer answer into heap_number.
void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
Register heap_number,
Register scratch) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, answer);
if (op_ == Token::SHR) {
__ vcvt_f64_u32(d0, s0);
} else {
__ vcvt_f64_s32(d0, s0);
}
__ sub(scratch, heap_number, Operand(kHeapObjectTag));
__ vstr(d0, scratch, HeapNumber::kValueOffset);
} else {
WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch);
__ CallStub(&stub);
}
}
void DeferredInlineSmiOperation::GenerateNonSmiInput() {
// We know the left hand side is not a Smi and the right hand side is an
// immediate value (value_) which can be represented as a Smi. We only
// handle bit operations.
ASSERT(Token::IsBitOp(op_));
if (FLAG_debug_code) {
__ Abort("Should not fall through!");
}
__ bind(&non_smi_input_);
if (FLAG_debug_code) {
__ AbortIfSmi(tos_register_);
}
// This routine uses the registers from r2 to r6. At the moment they are
// not used by the register allocator, but when they are it should use
// SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
Register heap_number_map = r7;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset));
__ cmp(r3, heap_number_map);
// Not a number, fall back to the GenericBinaryOpStub.
__ b(ne, entry_label());
Register int32 = r2;
// Not a 32bits signed int, fall back to the GenericBinaryOpStub.
__ ConvertToInt32(tos_register_, int32, r4, r5, entry_label());
// tos_register_ (r0 or r1): Original heap number.
// int32: signed 32bits int.
Label result_not_a_smi;
int shift_value = value_ & 0x1f;
switch (op_) {
case Token::BIT_OR: __ orr(int32, int32, Operand(value_)); break;
case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break;
case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break;
case Token::SAR:
ASSERT(!reversed_);
if (shift_value != 0) {
__ mov(int32, Operand(int32, ASR, shift_value));
}
break;
case Token::SHR:
ASSERT(!reversed_);
if (shift_value != 0) {
__ mov(int32, Operand(int32, LSR, shift_value), SetCC);
} else {
// SHR is special because it is required to produce a positive answer.
__ cmp(int32, Operand(0));
}
if (CpuFeatures::IsSupported(VFP3)) {
__ b(mi, &result_not_a_smi);
} else {
// Non VFP code cannot convert from unsigned to double, so fall back
// to GenericBinaryOpStub.
__ b(mi, entry_label());
}
break;
case Token::SHL:
ASSERT(!reversed_);
if (shift_value != 0) {
__ mov(int32, Operand(int32, LSL, shift_value));
}
break;
default: UNREACHABLE();
}
// Check that the *signed* result fits in a smi. Not necessary for AND, SAR
// if the shift if more than 0 or SHR if the shit is more than 1.
if (!( (op_ == Token::AND) ||
((op_ == Token::SAR) && (shift_value > 0)) ||
((op_ == Token::SHR) && (shift_value > 1)))) {
__ add(r3, int32, Operand(0x40000000), SetCC);
__ b(mi, &result_not_a_smi);
}
__ mov(tos_register_, Operand(int32, LSL, kSmiTagSize));
Exit();
if (result_not_a_smi.is_linked()) {
__ bind(&result_not_a_smi);
if (overwrite_mode_ != OVERWRITE_LEFT) {
ASSERT((overwrite_mode_ == NO_OVERWRITE) ||
(overwrite_mode_ == OVERWRITE_RIGHT));
// If the allocation fails, fall back to the GenericBinaryOpStub.
__ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label());
// Nothing can go wrong now, so overwrite tos.
__ mov(tos_register_, Operand(r4));
}
// int32: answer as signed 32bits integer.
// tos_register_: Heap number to write the answer into.
WriteNonSmiAnswer(int32, tos_register_, r3);
Exit();
}
}
void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
// The input from a bitwise operation were Smis but the result cannot fit
// into a Smi, so we store it into a heap number. tos_resgiter_ holds the
// result to be converted.
ASSERT(Token::IsBitOp(op_));
ASSERT(!reversed_);
if (FLAG_debug_code) {
__ Abort("Should not fall through!");
}
__ bind(&answer_out_of_range_);
if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) {
// >>> 0 is a special case where the result is already tagged but wrong
// because the Smi is negative. We untag it.
__ mov(tos_register_, Operand(tos_register_, ASR, kSmiTagSize));
}
// This routine uses the registers from r2 to r6. At the moment they are
// not used by the register allocator, but when they are it should use
// SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
// Allocate the result heap number.
Register heap_number_map = r7;
Register heap_number = r4;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// If the allocation fails, fall back to the GenericBinaryOpStub.
__ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label());
WriteNonSmiAnswer(tos_register_, heap_number, r3);
__ mov(tos_register_, Operand(heap_number));
Exit();
}
@ -1191,10 +1396,10 @@ void CodeGenerator::SmiOperation(Token::Value op,
}
frame_->EmitPush(tos, TypeInfo::Smi());
} else {
DeferredCode* deferred =
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
__ tst(tos, Operand(kSmiTagMask));
deferred->Branch(ne);
deferred->JumpToNonSmiInput(ne);
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
@ -1240,17 +1445,17 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::SHR:
case Token::SAR: {
ASSERT(!reversed);
int shift_amount = int_value & 0x1f;
int shift_value = int_value & 0x1f;
TypeInfo result = TypeInfo::Number();
if (op == Token::SHR) {
if (shift_amount > 1) {
if (shift_value > 1) {
result = TypeInfo::Smi();
} else if (shift_amount > 0) {
} else if (shift_value > 0) {
result = TypeInfo::Integer32();
}
} else if (op == Token::SAR) {
if (shift_amount > 0) {
if (shift_value > 0) {
result = TypeInfo::Smi();
} else {
result = TypeInfo::Integer32();
@ -1260,77 +1465,67 @@ void CodeGenerator::SmiOperation(Token::Value op,
result = TypeInfo::Integer32();
}
Register scratch = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
int shift_value = int_value & 0x1f; // least significant 5 bits
DeferredCode* deferred =
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
uint32_t problematic_mask = kSmiTagMask;
// For unsigned shift by zero all negative smis are problematic.
bool skip_smi_test = both_sides_are_smi;
if (shift_value == 0 && op == Token::SHR) {
problematic_mask |= 0x80000000;
skip_smi_test = false;
}
if (!skip_smi_test) {
__ tst(tos, Operand(problematic_mask));
deferred->Branch(ne); // Go slow for problematic input.
if (!both_sides_are_smi) {
__ tst(tos, Operand(kSmiTagMask));
deferred->JumpToNonSmiInput(ne);
}
switch (op) {
case Token::SHL: {
if (shift_value != 0) {
Register scratch = VirtualFrame::scratch0();
int adjusted_shift = shift_value - kSmiTagSize;
ASSERT(adjusted_shift >= 0);
if (adjusted_shift != 0) {
__ mov(scratch, Operand(tos, LSL, adjusted_shift));
// Check that the *signed* result fits in a smi.
__ add(scratch2, scratch, Operand(0x40000000), SetCC);
deferred->Branch(mi);
__ mov(tos, Operand(scratch, LSL, kSmiTagSize));
} else {
// Check that the *signed* result fits in a smi.
__ add(scratch2, tos, Operand(0x40000000), SetCC);
deferred->Branch(mi);
__ mov(tos, Operand(tos, LSL, kSmiTagSize));
__ mov(tos, Operand(tos, LSL, adjusted_shift));
}
// Check that the *signed* result fits in a smi.
__ add(scratch, tos, Operand(0x40000000), SetCC);
deferred->JumpToAnswerOutOfRange(mi);
__ mov(tos, Operand(tos, LSL, kSmiTagSize));
}
break;
}
case Token::SHR: {
if (shift_value != 0) {
Register scratch = VirtualFrame::scratch0();
__ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Remove tag.
// LSR by immediate 0 means shifting 32 bits.
__ mov(scratch, Operand(scratch, LSR, shift_value));
__ mov(tos, Operand(scratch, LSR, shift_value));
if (shift_value == 1) {
// check that the *unsigned* result fits in a smi
// neither of the two high-order bits can be set:
// Check that the *unsigned* result fits in a smi.
// Neither of the two high-order bits can be set:
// - 0x80000000: high bit would be lost when smi tagging
// - 0x40000000: this number would convert to negative when
// smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi
__ tst(scratch, Operand(0xc0000000));
deferred->Branch(ne);
} else {
ASSERT(shift_value >= 2);
result = TypeInfo::Smi(); // SHR by at least 2 gives a Smi.
// - 0x40000000: this number would convert to negative when Smi
// tagging.
// These two cases can only happen with shifts by 0 or 1 when
// handed a valid smi.
__ tst(tos, Operand(0xc0000000));
if (!CpuFeatures::IsSupported(VFP3)) {
// If the unsigned result does not fit in a Smi, we require an
// unsigned to double conversion. Without VFP V8 has to fall
// back to the runtime. The deferred code will expect tos
// to hold the original Smi to be shifted.
__ mov(tos, Operand(scratch, LSL, kSmiTagSize), LeaveCC, ne);
}
__ mov(tos, Operand(scratch, LSL, kSmiTagSize));
deferred->JumpToAnswerOutOfRange(ne);
}
__ mov(tos, Operand(tos, LSL, kSmiTagSize));
} else {
__ cmp(tos, Operand(0));
deferred->JumpToAnswerOutOfRange(mi);
}
break;
}
case Token::SAR: {
// In the ARM instructions set, ASR by immediate 0 means shifting 32
// bits.
if (shift_value != 0) {
// Do the shift and the tag removal in one operation. If the shift
// is 31 bits (the highest possible value) then we emit the
// instruction as a shift by 0 which means shift arithmetically by
// 32.
// instruction as a shift by 0 which in the ARM ISA means shift
// arithmetically by 32.
__ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
// Put tag back.
__ mov(tos, Operand(tos, LSL, kSmiTagSize));
// SAR by at least 1 gives a Smi.
result = TypeInfo::Smi();
}
break;
}

View File

@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <limits.h> // For LONG_MIN, LONG_MAX.
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
@ -1333,6 +1335,104 @@ void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
}
// Tries to get a signed int32 out of a double precision floating point heap
// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
// 32bits signed integer range.
void MacroAssembler::ConvertToInt32(Register source,
Register dest,
Register scratch,
Register scratch2,
Label *not_int32) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
sub(scratch, source, Operand(kHeapObjectTag));
vldr(d0, scratch, HeapNumber::kValueOffset);
vcvt_s32_f64(s0, d0);
vmov(dest, s0);
// Signed vcvt instruction will saturate to the minimum (0x80000000) or
// maximun (0x7fffffff) signed 32bits integer when the double is out of
// range. When substracting one, the minimum signed integer becomes the
// maximun signed integer.
sub(scratch, dest, Operand(1));
cmp(scratch, Operand(LONG_MAX - 1));
// If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
b(ge, not_int32);
} else {
// This code is faster for doubles that are in the ranges -0x7fffffff to
// -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
// the range of signed int32 values that are not Smis. Jumps to the label
// 'not_int32' if the double isn't in the range -0x80000000.0 to
// 0x80000000.0 (excluding the endpoints).
Label right_exponent, done;
// Get exponent word.
ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
Ubfx(scratch2,
scratch,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
// Load dest with zero. We use this either for the final shift or
// for the answer.
mov(dest, Operand(0));
// Check whether the exponent matches a 32 bit signed int that is not a Smi.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
// the exponent that we are fastest at and also the highest exponent we can
// handle here.
const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
// The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
// split it up to avoid a constant pool entry. You can't do that in general
// for cmp because of the overflow flag, but we know the exponent is in the
// range 0-2047 so there is no overflow.
int fudge_factor = 0x400;
sub(scratch2, scratch2, Operand(fudge_factor));
cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
// If we have a match of the int32-but-not-Smi exponent then skip some
// logic.
b(eq, &right_exponent);
// If the exponent is higher than that then go to slow case. This catches
// numbers that don't fit in a signed int32, infinities and NaNs.
b(gt, not_int32);
// We know the exponent is smaller than 30 (biased). If it is less than
// 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
// it rounds to zero.
const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
// Dest already has a Smi zero.
b(lt, &done);
// We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
// get how much to shift down.
rsb(dest, scratch2, Operand(30));
bind(&right_exponent);
// Get the top bits of the mantissa.
and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
// Put back the implicit 1.
orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
// Shift up the mantissa bits to take up the space the exponent used to
// take. We just orred in the implicit bit so that took care of one and
// we want to leave the sign bit 0 so we subtract 2 bits from the shift
// distance.
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
mov(scratch2, Operand(scratch2, LSL, shift_distance));
// Put sign in zero flag.
tst(scratch, Operand(HeapNumber::kSignMask));
// Get the second half of the double. For some exponents we don't
// actually need this because the bits get shifted out again, but
// it's probably slower to test than just to do it.
ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
// Shift down 22 bits to get the last 10 bits.
orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
// Move down according to the exponent.
mov(dest, Operand(scratch, LSR, dest));
// Fix sign if sign bit was set.
rsb(dest, dest, Operand(0), LeaveCC, ne);
bind(&done);
}
}
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {

View File

@ -504,6 +504,15 @@ class MacroAssembler: public Assembler {
Register scratch1,
SwVfpRegister scratch2);
// Convert the HeapNumber pointed to by source to a 32bits signed integer
// dest. If the HeapNumber does not fit into a 32bits signed integer branch
// to not_int32 label.
void ConvertToInt32(Register source,
Register dest,
Register scratch,
Register scratch2,
Label *not_int32);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
// for 0 (31 instead of 32). Source and scratch can be the same in which case