X64: Use allocation with no scratch registers to avoid push/pop.

Minor prettifications.

Review URL: http://codereview.chromium.org/1862001

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4562 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
lrn@chromium.org 2010-05-03 09:54:48 +00:00
parent 3906e2b109
commit 289b245d2b
2 changed files with 20 additions and 25 deletions

View File

@ -5969,7 +5969,7 @@ void DeferredInlineBinaryOperation::Generate() {
|| (op_ ==Token::SUB)
|| (op_ == Token::MUL)
|| (op_ == Token::DIV)) {
Label call_runtime, after_alloc_failure;
Label call_runtime;
Label left_smi, right_smi, load_right, do_op;
__ JumpIfSmi(left_, &left_smi);
__ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
@ -5987,9 +5987,7 @@ void DeferredInlineBinaryOperation::Generate() {
__ Integer32ToSmi(left_, left_);
if (mode_ == OVERWRITE_LEFT) {
Label alloc_failure;
__ push(left_);
__ AllocateHeapNumber(dst_, left_, &after_alloc_failure);
__ pop(left_);
__ AllocateHeapNumber(dst_, no_reg, &call_runtime);
}
__ bind(&load_right);
@ -6002,9 +6000,7 @@ void DeferredInlineBinaryOperation::Generate() {
__ movq(dst_, right_);
} else if (mode_ == NO_OVERWRITE) {
Label alloc_failure;
__ push(left_);
__ AllocateHeapNumber(dst_, left_, &after_alloc_failure);
__ pop(left_);
__ AllocateHeapNumber(dst_, no_reg, &call_runtime);
}
__ jmp(&do_op);
@ -6014,9 +6010,7 @@ void DeferredInlineBinaryOperation::Generate() {
__ Integer32ToSmi(right_, right_);
if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
Label alloc_failure;
__ push(left_);
__ AllocateHeapNumber(dst_, left_, &after_alloc_failure);
__ pop(left_);
__ AllocateHeapNumber(dst_, no_reg, &call_runtime);
}
__ bind(&do_op);
@ -6030,8 +6024,6 @@ void DeferredInlineBinaryOperation::Generate() {
__ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
__ jmp(&done);
__ bind(&after_alloc_failure);
__ pop(left_);
__ bind(&call_runtime);
}
GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);

View File

@ -2356,18 +2356,17 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
return;
}
// Move address of new object to result. Use scratch register if available.
if (!scratch.is_valid()) {
if (result.is(rax)) {
load_rax(new_space_allocation_top);
} else {
movq(kScratchRegister, new_space_allocation_top);
movq(result, Operand(kScratchRegister, 0));
}
} else {
// Move address of new object to result. Use scratch register if available,
// and keep address in scratch until call to UpdateAllocationTopHelper.
if (scratch.is_valid()) {
ASSERT(!scratch.is(result_end));
movq(scratch, new_space_allocation_top);
movq(result, Operand(scratch, 0));
} else if (result.is(rax)) {
load_rax(new_space_allocation_top);
} else {
movq(kScratchRegister, new_space_allocation_top);
movq(result, Operand(kScratchRegister, 0));
}
}
@ -2388,11 +2387,11 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
store_rax(new_space_allocation_top);
} else {
// Register required - use scratch provided if available.
if (!scratch.is_valid()) {
if (scratch.is_valid()) {
movq(Operand(scratch, 0), result_end);
} else {
movq(kScratchRegister, new_space_allocation_top);
movq(Operand(kScratchRegister, 0), result_end);
} else {
movq(Operand(scratch, 0), result_end);
}
}
}
@ -2415,7 +2414,11 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
Register top_reg = result_end.is_valid() ? result_end : result;
lea(top_reg, Operand(result, object_size));
if (top_reg.is(result)) {
addq(top_reg, Immediate(object_size));
} else {
lea(top_reg, Operand(result, object_size));
}
movq(kScratchRegister, new_space_allocation_limit);
cmpq(top_reg, Operand(kScratchRegister, 0));
j(above, gc_required);