ARM: Load the heap number map into a register and keep it

there throughout the binary op stub (used for checking and
creating heap numbers).
Review URL: http://codereview.chromium.org/2843010

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4897 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
erik.corry@gmail.com 2010-06-17 21:51:51 +00:00
parent 078d285353
commit 694669926c
6 changed files with 83 additions and 35 deletions

View File

@ -4706,7 +4706,8 @@ void CodeGenerator::GenerateRandomHeapNumber(
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
__ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
@ -7363,12 +7364,16 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
Register heap_number_map = r6;
if (ShouldGenerateSmiCode()) {
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Smi-smi case (overflow).
// Since both are Smis there is no heap number to overwrite, so allocate.
// The new heap number is in r5. r6 and r7 are scratch.
__ AllocateHeapNumber(r5, r6, r7, lhs.is(r0) ? &slow_reverse : &slow);
// The new heap number is in r5. r3 and r7 are scratch.
__ AllocateHeapNumber(
r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values.
@ -7381,14 +7386,14 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ vmov(s13, r7);
__ vcvt_f64_s32(d6, s13);
} else {
// Write Smi from rhs to r3 and r2 in double format. r6 is scratch.
// Write Smi from rhs to r3 and r2 in double format. r3 is scratch.
__ mov(r7, Operand(rhs));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
ConvertToDoubleStub stub1(r3, r2, r7, r9);
__ push(lr);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
// Write Smi from lhs to r1 and r0 in double format. r6 is scratch.
// Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
__ mov(r7, Operand(lhs));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
ConvertToDoubleStub stub2(r1, r0, r7, r9);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
@ -7397,6 +7402,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
// We branch here if at least one of r0 and r1 is not a Smi.
__ bind(not_smi);
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// After this point we have the left hand side in r1 and the right hand side
// in r0.
@ -7419,18 +7425,22 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
default:
break;
}
// Restore heap number map register.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
}
if (mode_ == NO_OVERWRITE) {
// In the case where there is no chance of an overwritable float we may as
// well do the allocation immediately while r0 and r1 are untouched.
__ AllocateHeapNumber(r5, r6, r7, &slow);
__ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
}
// Move r0 to a double in r2-r3.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_RIGHT) {
__ mov(r5, Operand(r0)); // Overwrite this heap number.
@ -7448,7 +7458,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ bind(&r0_is_smi);
if (mode_ == OVERWRITE_RIGHT) {
// We can't overwrite a Smi so get address of new heap number into r5.
__ AllocateHeapNumber(r5, r6, r7, &slow);
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
if (use_fp_registers) {
@ -7460,7 +7470,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub3(r3, r2, r7, r6);
ConvertToDoubleStub stub3(r3, r2, r7, r4);
__ push(lr);
__ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
@ -7473,6 +7483,8 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &r1_is_not_smi);
GenerateTypeTransition(masm);
// Restore heap number map register.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&r1_is_smi);
}
@ -7482,7 +7494,9 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ bind(&r1_is_not_smi);
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
__ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_LEFT) {
__ mov(r5, Operand(r1)); // Overwrite this heap number.
@ -7500,7 +7514,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ bind(&r1_is_smi);
if (mode_ == OVERWRITE_LEFT) {
// We can't overwrite a Smi so get address of new heap number into r5.
__ AllocateHeapNumber(r5, r6, r7, &slow);
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
if (use_fp_registers) {
@ -7512,7 +7526,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub4(r1, r0, r7, r6);
ConvertToDoubleStub stub4(r1, r0, r7, r9);
__ push(lr);
__ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
@ -7573,13 +7587,14 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
}
}
if (lhs.is(r0)) {
__ b(&slow);
__ bind(&slow_reverse);
__ Swap(r0, r1, ip);
}
heap_number_map = no_reg; // Don't use this any more from here on.
// We jump to here if something goes wrong (one param is not a number of any
// sort or new-space allocation fails).
__ bind(&slow);
@ -7745,9 +7760,13 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
Label rhs_is_smi, lhs_is_smi;
Label done_checking_rhs, done_checking_lhs;
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ tst(lhs, Operand(kSmiTagMask));
__ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
__ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
GetInt32(masm, lhs, r3, r5, r4, &slow);
__ jmp(&done_checking_lhs);
@ -7757,7 +7776,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ tst(rhs, Operand(kSmiTagMask));
__ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
__ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
GetInt32(masm, rhs, r2, r5, r4, &slow);
__ jmp(&done_checking_rhs);
@ -7817,8 +7837,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
break;
}
case NO_OVERWRITE: {
// Get a new heap number in r5. r6 and r7 are scratch.
__ AllocateHeapNumber(r5, r6, r7, &slow);
// Get a new heap number in r5. r4 and r7 are scratch.
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
default: break;
}
@ -7837,8 +7857,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
if (mode_ != NO_OVERWRITE) {
__ bind(&have_to_allocate);
// Get a new heap number in r5. r6 and r7 are scratch.
__ AllocateHeapNumber(r5, r6, r7, &slow);
// Get a new heap number in r5. r4 and r7 are scratch.
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
__ jmp(&got_a_heap_number);
}
@ -8409,6 +8429,9 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Label slow, done;
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
if (op_ == Token::SUB) {
// Check whether the value is a smi.
Label try_float;
@ -8429,7 +8452,9 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ b(&done);
__ bind(&try_float);
__ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r1, heap_number_map);
__ b(ne, &slow);
// r0 is a heap number. Get a new heap number in r1.
if (overwrite_) {
@ -8437,7 +8462,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
} else {
__ AllocateHeapNumber(r1, r2, r3, &slow);
__ AllocateHeapNumber(r1, r2, r3, r6, &slow);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
@ -8447,7 +8472,9 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
}
} else if (op_ == Token::BIT_NOT) {
// Check if the operand is a heap number.
__ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r1, heap_number_map);
__ b(ne, &slow);
// Convert the heap number is r0 to an untagged integer in r1.
@ -8467,7 +8494,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
// Allocate a fresh heap number, but don't overwrite r0 until
// we're sure we can do it without going through the slow case
// that needs the value in r0.
__ AllocateHeapNumber(r2, r3, r4, &slow);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ mov(r0, Operand(r2));
}

View File

@ -669,7 +669,9 @@ class GenericBinaryOpStub : public CodeStub {
}
void Generate(MacroAssembler* masm);
void HandleNonSmiBitwiseOp(MacroAssembler* masm, Register lhs, Register rhs);
void HandleNonSmiBitwiseOp(MacroAssembler* masm,
Register lhs,
Register rhs);
void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
Register lhs,

View File

@ -2156,7 +2156,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
__ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);

View File

@ -1339,7 +1339,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ bind(&box_int);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Use r0 for result as key is not needed any more.
__ AllocateHeapNumber(r0, r3, r4, &slow);
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, r3, r4, r6, &slow);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
@ -1370,7 +1371,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ AllocateHeapNumber(r2, r3, r4, &slow);
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_u32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
@ -1407,7 +1409,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
// clobbers all registers - also when jumping due to exhausted young
// space.
__ AllocateHeapNumber(r4, r5, r6, &slow);
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r4, r5, r7, r6, &slow);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
@ -1423,7 +1426,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ AllocateHeapNumber(r2, r3, r4, &slow);
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_f32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
@ -1434,7 +1438,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ AllocateHeapNumber(r3, r4, r5, &slow);
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r3, r4, r5, r6, &slow);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)

View File

@ -1528,6 +1528,16 @@ void MacroAssembler::Assert(Condition cc, const char* msg) {
}
void MacroAssembler::AssertRegisterIsRoot(Register reg,
Heap::RootListIndex index) {
if (FLAG_debug_code) {
LoadRoot(ip, index);
cmp(reg, ip);
Check(eq, "Register did not match expected root");
}
}
void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
b(cc, &L);
@ -1646,6 +1656,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Register heap_number_map,
Label* gc_required) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
@ -1656,9 +1667,9 @@ void MacroAssembler::AllocateHeapNumber(Register result,
gc_required,
TAG_OBJECT);
// Get heap number map and store it in the allocated object.
LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
// Store heap number map in the allocated object.
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}

View File

@ -379,6 +379,7 @@ class MacroAssembler: public Assembler {
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Register heap_number_map,
Label* gc_required);
// ---------------------------------------------------------------------------
@ -558,6 +559,7 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);