IA32: Avoid going into stubs or runtime code for bitops even if the

inputs are heap numbers or the result is a heap number (only with
SSE2).  Make it possible for a deferred code object to work without
spilling all registers.
Review URL: http://codereview.chromium.org/3054047

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5215 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
erik.corry@gmail.com 2010-08-09 13:12:02 +00:00
parent df8b3eb742
commit 751ec5d099
8 changed files with 451 additions and 91 deletions

View File

@ -77,14 +77,23 @@ void CodeGenerator::ProcessDeferred() {
// Generate the code.
Comment cmnt(masm_, code->comment());
masm_->bind(code->entry_label());
code->SaveRegisters();
if (code->AutoSaveAndRestore()) {
code->SaveRegisters();
}
code->Generate();
code->RestoreRegisters();
masm_->jmp(code->exit_label());
if (code->AutoSaveAndRestore()) {
code->RestoreRegisters();
code->Exit();
}
}
}
void DeferredCode::Exit() {
masm_->jmp(exit_label());
}
void CodeGenerator::SetFrame(VirtualFrame* new_frame,
RegisterFile* non_frame_registers) {
RegisterFile saved_counts;

View File

@ -319,6 +319,15 @@ class DeferredCode: public ZoneObject {
void SaveRegisters();
void RestoreRegisters();
void Exit();
// If this returns true then all registers will be saved for the duration
// of the Generate() call. Otherwise the registers are not saved and the
// Generate() call must bracket runtime any runtime calls with calls to
// SaveRegisters() and RestoreRegisters(). In this case the Generate
// method must also call Exit() in order to return to the non-deferred
// code.
virtual bool AutoSaveAndRestore() { return true; }
protected:
MacroAssembler* masm_;

View File

@ -1142,6 +1142,21 @@ void Assembler::rcl(Register dst, uint8_t imm8) {
}
void Assembler::rcr(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
EMIT(0xD1);
EMIT(0xD8 | dst.code());
} else {
EMIT(0xC1);
EMIT(0xD8 | dst.code());
EMIT(imm8);
}
}
void Assembler::sar(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;

View File

@ -625,6 +625,7 @@ class Assembler : public Malloced {
void or_(const Operand& dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8);
void rcr(Register dst, uint8_t imm8);
void sar(Register dst, uint8_t imm8);
void sar_cl(Register dst);

View File

@ -1038,7 +1038,11 @@ const char* GenericBinaryOpStub::GetName() {
}
// Call the specialized stub for a binary operation.
// Perform or call the specialized stub for a binary operation. Requires the
// three registers left, right and dst to be distinct and spilled. This
// deferred operation has up to three entry points: The main one calls the
// runtime system. The second is for when the result is a non-Smi. The
// third is for when at least one of the inputs is non-Smi and we have SSE2.
class DeferredInlineBinaryOperation: public DeferredCode {
public:
DeferredInlineBinaryOperation(Token::Value op,
@ -1055,7 +1059,18 @@ class DeferredInlineBinaryOperation: public DeferredCode {
virtual void Generate();
// This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
// Exit().
virtual bool AutoSaveAndRestore() { return false; }
void JumpToAnswerOutOfRange(Condition cond);
void JumpToConstantRhs(Condition cond, Smi* smi_value);
Label* NonSmiInputLabel();
private:
void GenerateAnswerOutOfRange();
void GenerateNonSmiInput();
Token::Value op_;
Register dst_;
Register left_;
@ -1063,15 +1078,42 @@ class DeferredInlineBinaryOperation: public DeferredCode {
TypeInfo left_info_;
TypeInfo right_info_;
OverwriteMode mode_;
Label answer_out_of_range_;
Label non_smi_input_;
Label constant_rhs_;
Smi* smi_value_;
};
Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
if (Token::IsBitOp(op_) && CpuFeatures::IsSupported(SSE2)) {
return &non_smi_input_;
} else {
return entry_label();
}
}
void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) {
__ j(cond, &answer_out_of_range_);
}
void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
Smi* smi_value) {
smi_value_ = smi_value;
__ j(cond, &constant_rhs_);
}
void DeferredInlineBinaryOperation::Generate() {
Label done;
if (CpuFeatures::IsSupported(SSE2) && ((op_ == Token::ADD) ||
(op_ ==Token::SUB) ||
(op_ == Token::MUL) ||
(op_ == Token::DIV))) {
// Registers are not saved implicitly for this stub, so we should not
// tread on the registers that were not passed to us.
if (CpuFeatures::IsSupported(SSE2) &&
((op_ == Token::ADD) ||
(op_ == Token::SUB) ||
(op_ == Token::MUL) ||
(op_ == Token::DIV))) {
CpuFeatures::Scope use_sse2(SSE2);
Label call_runtime, after_alloc_failure;
Label left_smi, right_smi, load_right, do_op;
@ -1131,7 +1173,6 @@ void DeferredInlineBinaryOperation::Generate() {
__ cvtsi2sd(xmm1, Operand(right_));
__ SmiTag(right_);
if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
Label alloc_failure;
__ push(left_);
__ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
__ pop(left_);
@ -1146,19 +1187,200 @@ void DeferredInlineBinaryOperation::Generate() {
default: UNREACHABLE();
}
__ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
__ jmp(&done);
Exit();
__ bind(&after_alloc_failure);
__ pop(left_);
__ bind(&call_runtime);
}
// Register spilling is not done implicitly for this stub.
// We can't postpone it any more now though.
SaveRegisters();
GenericBinaryOpStub stub(op_,
mode_,
NO_SMI_CODE_IN_STUB,
TypeInfo::Combine(left_info_, right_info_));
stub.GenerateCall(masm_, left_, right_);
if (!dst_.is(eax)) __ mov(dst_, eax);
__ bind(&done);
RestoreRegisters();
Exit();
if (non_smi_input_.is_linked() || constant_rhs_.is_linked()) {
GenerateNonSmiInput();
}
if (answer_out_of_range_.is_linked()) {
GenerateAnswerOutOfRange();
}
}
void DeferredInlineBinaryOperation::GenerateNonSmiInput() {
// We know at least one of the inputs was not a Smi.
// This is a third entry point into the deferred code.
// We may not overwrite left_ because we want to be able
// to call the handling code for non-smi answer and it
// might want to overwrite the heap number in left_.
ASSERT(!right_.is(dst_));
ASSERT(!left_.is(dst_));
ASSERT(!left_.is(right_));
// This entry point is used for bit ops where the right hand side
// is a constant Smi and the left hand side is a heap object. It
// is also used for bit ops where both sides are unknown, but where
// at least one of them is a heap object.
bool rhs_is_constant = constant_rhs_.is_linked();
// We can't generate code for both cases.
ASSERT(!non_smi_input_.is_linked() || !constant_rhs_.is_linked());
if (FLAG_debug_code) {
__ int3(); // We don't fall through into this code.
}
__ bind(&non_smi_input_);
if (rhs_is_constant) {
__ bind(&constant_rhs_);
// In this case the input is a heap object and it is in the dst_ register.
// The left_ and right_ registers have not been initialized yet.
__ mov(right_, Immediate(smi_value_));
__ mov(left_, Operand(dst_));
if (!CpuFeatures::IsSupported(SSE2)) {
__ jmp(entry_label());
return;
} else {
CpuFeatures::Scope use_sse2(SSE2);
__ JumpIfNotNumber(dst_, left_info_, entry_label());
__ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
__ SmiUntag(right_);
}
} else {
// We know we have SSE2 here because otherwise the label is not linked (see
// NonSmiInputLabel).
CpuFeatures::Scope use_sse2(SSE2);
// Handle the non-constant right hand side situation:
if (left_info_.IsSmi()) {
// Right is a heap object.
__ JumpIfNotNumber(right_, right_info_, entry_label());
__ ConvertToInt32(right_, right_, dst_, left_info_, entry_label());
__ mov(dst_, Operand(left_));
__ SmiUntag(dst_);
} else if (right_info_.IsSmi()) {
// Left is a heap object.
__ JumpIfNotNumber(left_, left_info_, entry_label());
__ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
__ SmiUntag(right_);
} else {
// Here we don't know if it's one or both that is a heap object.
Label only_right_is_heap_object, got_both;
__ mov(dst_, Operand(left_));
__ SmiUntag(dst_, &only_right_is_heap_object);
// Left was a heap object.
__ JumpIfNotNumber(left_, left_info_, entry_label());
__ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
__ SmiUntag(right_, &got_both);
// Both were heap objects.
__ rcl(right_, 1); // Put tag back.
__ JumpIfNotNumber(right_, right_info_, entry_label());
__ ConvertToInt32(right_, right_, no_reg, left_info_, entry_label());
__ jmp(&got_both);
__ bind(&only_right_is_heap_object);
__ JumpIfNotNumber(right_, right_info_, entry_label());
__ ConvertToInt32(right_, right_, no_reg, left_info_, entry_label());
__ bind(&got_both);
}
}
ASSERT(op_ == Token::BIT_AND ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_XOR ||
right_.is(ecx));
switch (op_) {
case Token::BIT_AND: __ and_(dst_, Operand(right_)); break;
case Token::BIT_OR: __ or_(dst_, Operand(right_)); break;
case Token::BIT_XOR: __ xor_(dst_, Operand(right_)); break;
case Token::SHR: __ shr_cl(dst_); break;
case Token::SAR: __ sar_cl(dst_); break;
case Token::SHL: __ shl_cl(dst_); break;
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
// Check that the *unsigned* result fits in a smi. Neither of
// the two high-order bits can be set:
// * 0x80000000: high bit would be lost when smi tagging.
// * 0x40000000: this number would convert to negative when smi
// tagging.
__ test(dst_, Immediate(0xc0000000));
__ j(not_zero, &answer_out_of_range_);
} else {
// Check that the *signed* result fits in a smi.
__ cmp(dst_, 0xc0000000);
__ j(negative, &answer_out_of_range_);
}
__ SmiTag(dst_);
Exit();
}
void DeferredInlineBinaryOperation::GenerateAnswerOutOfRange() {
Label after_alloc_failure2;
Label allocation_ok;
__ bind(&after_alloc_failure2);
// We have to allocate a number, causing a GC, while keeping hold of
// the answer in dst_. The answer is not a Smi. We can't just call the
// runtime shift function here because we already threw away the inputs.
__ xor_(left_, Operand(left_));
__ shl(dst_, 1); // Put top bit in carry flag and Smi tag the low bits.
__ rcr(left_, 1); // Rotate with carry.
__ push(dst_); // Smi tagged low 31 bits.
__ push(left_); // 0 or 0x80000000, which is Smi tagged in both cases.
__ CallRuntime(Runtime::kNumberAlloc, 0);
if (!left_.is(eax)) {
__ mov(left_, eax);
}
__ pop(right_); // High bit.
__ pop(dst_); // Low 31 bits.
__ shr(dst_, 1); // Put 0 in top bit.
__ or_(dst_, Operand(right_));
__ jmp(&allocation_ok);
// This is the second entry point to the deferred code. It is used only by
// the bit operations.
// The dst_ register has the answer. It is not Smi tagged. If mode_ is
// OVERWRITE_LEFT then left_ must contain either an overwritable heap number
// or a Smi.
// Put a heap number pointer in left_.
__ bind(&answer_out_of_range_);
SaveRegisters();
if (mode_ == OVERWRITE_LEFT) {
__ test(left_, Immediate(kSmiTagMask));
__ j(not_zero, &allocation_ok);
}
// This trashes right_.
__ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
__ bind(&allocation_ok);
if (CpuFeatures::IsSupported(SSE2) && op_ != Token::SHR) {
CpuFeatures::Scope use_sse2(SSE2);
ASSERT(Token::IsBitOp(op_));
// Signed conversion.
__ cvtsi2sd(xmm0, Operand(dst_));
__ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0);
} else {
if (op_ == Token::SHR) {
__ push(dst_);
__ push(Immediate(0)); // High word of unsigned value.
__ fild_d(Operand(esp, 0));
__ Drop(2);
} else {
ASSERT(Token::IsBitOp(op_));
__ push(dst_);
__ fild_s(Operand(esp, 0)); // Signed conversion.
__ pop(dst_);
}
__ fstp_d(FieldOperand(left_, HeapNumber::kValueOffset));
}
__ mov(dst_, left_);
RestoreRegisters();
Exit();
}
@ -1499,10 +1721,25 @@ void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
TypeInfo left_info,
TypeInfo right_info,
DeferredCode* deferred) {
JumpIfNotBothSmiUsingTypeInfo(left,
right,
scratch,
left_info,
right_info,
deferred->entry_label());
}
void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
Register right,
Register scratch,
TypeInfo left_info,
TypeInfo right_info,
Label* on_not_smi) {
if (left.is(right)) {
if (!left_info.IsSmi()) {
__ test(left, Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ j(not_zero, on_not_smi);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(left);
}
@ -1511,17 +1748,17 @@ void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
__ mov(scratch, left);
__ or_(scratch, Operand(right));
__ test(scratch, Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ j(not_zero, on_not_smi);
} else {
__ test(left, Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ j(not_zero, on_not_smi);
if (FLAG_debug_code) __ AbortIfNotSmi(right);
}
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(left);
if (!right_info.IsSmi()) {
__ test(right, Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ j(not_zero, on_not_smi);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(right);
}
@ -1606,6 +1843,10 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
right->ToRegister();
frame_->Spill(eax);
frame_->Spill(edx);
// DeferredInlineBinaryOperation requires all the registers that it is
// told about to be spilled.
frame_->Spill(left->reg());
frame_->Spill(right->reg());
// Check that left and right are smi tagged.
DeferredInlineBinaryOperation* deferred =
@ -1695,15 +1936,22 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left->ToRegister();
ASSERT(left->is_register() && !left->reg().is(ecx));
ASSERT(right->is_register() && right->reg().is(ecx));
if (left_type_info.IsSmi()) {
if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
}
if (right_type_info.IsSmi()) {
if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
}
// We will modify right, it must be spilled.
frame_->Spill(ecx);
// DeferredInlineBinaryOperation requires all the registers that it is told
// about to be spilled.
frame_->Spill(left->reg());
// Use a fresh answer register to avoid spilling the left operand.
answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Check that both operands are smis using the answer register as a
// temporary.
DeferredInlineBinaryOperation* deferred =
new DeferredInlineBinaryOperation(op,
answer.reg(),
@ -1712,55 +1960,28 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left_type_info,
right_type_info,
overwrite_mode);
JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
left_type_info, right_type_info,
deferred->NonSmiInputLabel());
Label do_op, left_nonsmi;
// If right is a smi we make a fast case if left is either a smi
// or a heapnumber.
if (CpuFeatures::IsSupported(SSE2) && right_type_info.IsSmi()) {
CpuFeatures::Scope use_sse2(SSE2);
__ mov(answer.reg(), left->reg());
// Fast case - both are actually smis.
if (!left_type_info.IsSmi()) {
__ test(answer.reg(), Immediate(kSmiTagMask));
__ j(not_zero, &left_nonsmi);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
}
if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
__ SmiUntag(answer.reg());
__ jmp(&do_op);
// Untag both operands.
__ mov(answer.reg(), left->reg());
__ SmiUntag(answer.reg());
__ SmiUntag(right->reg()); // Right is ecx.
__ bind(&left_nonsmi);
// Branch if not a heapnumber.
__ cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
Factory::heap_number_map());
deferred->Branch(not_equal);
// Load integer value into answer register using truncation.
__ cvttsd2si(answer.reg(),
FieldOperand(answer.reg(), HeapNumber::kValueOffset));
// Branch if we do not fit in a smi.
__ cmp(answer.reg(), 0xc0000000);
deferred->Branch(negative);
} else {
JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
left_type_info, right_type_info, deferred);
// Untag both operands.
__ mov(answer.reg(), left->reg());
__ SmiUntag(answer.reg());
}
__ bind(&do_op);
__ SmiUntag(ecx);
// Perform the operation.
ASSERT(right->reg().is(ecx));
switch (op) {
case Token::SAR:
case Token::SAR: {
__ sar_cl(answer.reg());
// No checks of result necessary
if (!left_type_info.IsSmi()) {
// Check that the *signed* result fits in a smi.
__ cmp(answer.reg(), 0xc0000000);
deferred->JumpToAnswerOutOfRange(negative);
}
break;
}
case Token::SHR: {
Label result_ok;
__ shr_cl(answer.reg());
// Check that the *unsigned* result fits in a smi. Neither of
// the two high-order bits can be set:
@ -1773,21 +1994,14 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
// case. The low bit of the left argument may be lost, but only
// in a case where it is dropped anyway.
__ test(answer.reg(), Immediate(0xc0000000));
__ j(zero, &result_ok);
__ SmiTag(ecx);
deferred->Jump();
__ bind(&result_ok);
deferred->JumpToAnswerOutOfRange(not_zero);
break;
}
case Token::SHL: {
Label result_ok;
__ shl_cl(answer.reg());
// Check that the *signed* result fits in a smi.
__ cmp(answer.reg(), 0xc0000000);
__ j(positive, &result_ok);
__ SmiTag(ecx);
deferred->Jump();
__ bind(&result_ok);
deferred->JumpToAnswerOutOfRange(negative);
break;
}
default:
@ -1805,6 +2019,10 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
// Handle the other binary operations.
left->ToRegister();
right->ToRegister();
// DeferredInlineBinaryOperation requires all the registers that it is told
// about to be spilled.
frame_->Spill(left->reg());
frame_->Spill(right->reg());
// A newly allocated register answer is used to hold the answer. The
// registers containing left and right are not modified so they don't
// need to be spilled in the fast case.
@ -1820,8 +2038,12 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left_type_info,
right_type_info,
overwrite_mode);
JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
left_type_info, right_type_info, deferred);
Label non_smi_bit_op;
if (op != Token::BIT_OR) {
JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
left_type_info, right_type_info,
deferred->NonSmiInputLabel());
}
__ mov(answer.reg(), left->reg());
switch (op) {
@ -1864,6 +2086,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
case Token::BIT_OR:
__ or_(answer.reg(), Operand(right->reg()));
__ test(answer.reg(), Immediate(kSmiTagMask));
__ j(not_zero, deferred->NonSmiInputLabel());
break;
case Token::BIT_AND:
@ -1878,6 +2102,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
UNREACHABLE();
break;
}
deferred->BindExit();
left->Unuse();
right->Unuse();
@ -2363,27 +2588,25 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
case Token::BIT_XOR:
case Token::BIT_AND: {
operand->ToRegister();
// DeferredInlineBinaryOperation requires all the registers that it is
// told about to be spilled.
frame_->Spill(operand->reg());
DeferredCode* deferred = NULL;
if (reversed) {
deferred =
new DeferredInlineSmiOperationReversed(op,
operand->reg(),
smi_value,
operand->reg(),
operand->type_info(),
overwrite_mode);
} else {
deferred = new DeferredInlineSmiOperation(op,
operand->reg(),
operand->reg(),
operand->type_info(),
smi_value,
overwrite_mode);
}
DeferredInlineBinaryOperation* deferred = NULL;
if (!operand->type_info().IsSmi()) {
Result left = allocator()->Allocate();
ASSERT(left.is_valid());
Result right = allocator()->Allocate();
ASSERT(right.is_valid());
deferred = new DeferredInlineBinaryOperation(
op,
operand->reg(),
left.reg(),
right.reg(),
operand->type_info(),
TypeInfo::Smi(),
overwrite_mode == NO_OVERWRITE ? NO_OVERWRITE : OVERWRITE_LEFT);
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
deferred->JumpToConstantRhs(not_zero, smi_value);
} else if (FLAG_debug_code) {
__ AbortIfNotSmi(operand->reg());
}
@ -2399,7 +2622,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
__ or_(Operand(operand->reg()), Immediate(value));
}
}
deferred->BindExit();
if (deferred != NULL) deferred->BindExit();
answer = *operand;
break;
}

View File

@ -530,7 +530,7 @@ class CodeGenerator: public AstVisitor {
// Emits code sequence that jumps to deferred code if the inputs
// are not both smis. Cannot be in MacroAssembler because it takes
// advantage of TypeInfo to skip unneeded checks.
// a deferred code object.
void JumpIfNotBothSmiUsingTypeInfo(Register left,
Register right,
Register scratch,
@ -538,6 +538,15 @@ class CodeGenerator: public AstVisitor {
TypeInfo right_info,
DeferredCode* deferred);
// Emits code sequence that jumps to the label if the inputs
// are not both smis.
void JumpIfNotBothSmiUsingTypeInfo(Register left,
Register right,
Register scratch,
TypeInfo left_info,
TypeInfo right_info,
Label* on_non_smi);
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
// Returns true if it succeeds. Otherwise it has no effect.

View File

@ -377,6 +377,12 @@ void MacroAssembler::AbortIfNotSmi(Register object) {
}
void MacroAssembler::AbortIfSmi(Register object) {
test(object, Immediate(kSmiTagMask));
Assert(not_equal, "Operand a smi");
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, Operand(esp));
@ -1510,6 +1516,61 @@ void MacroAssembler::Abort(const char* msg) {
}
void MacroAssembler::JumpIfNotNumber(Register reg,
TypeInfo info,
Label* on_not_number) {
if (FLAG_debug_code) AbortIfSmi(reg);
if (!info.IsNumber()) {
cmp(FieldOperand(reg, HeapObject::kMapOffset),
Factory::heap_number_map());
j(not_equal, on_not_number);
}
}
void MacroAssembler::ConvertToInt32(Register dst,
Register source,
Register scratch,
TypeInfo info,
Label* on_not_int32) {
if (FLAG_debug_code) {
AbortIfSmi(source);
AbortIfNotNumber(source);
}
if (info.IsInteger32()) {
cvttsd2si(dst, FieldOperand(source, HeapNumber::kValueOffset));
} else {
Label done;
bool push_pop = (scratch.is(no_reg) && dst.is(source));
ASSERT(!scratch.is(source));
if (push_pop) {
push(dst);
scratch = dst;
}
if (scratch.is(no_reg)) scratch = dst;
cvttsd2si(scratch, FieldOperand(source, HeapNumber::kValueOffset));
cmp(scratch, 0x80000000u);
if (push_pop || dst.is(source)) {
j(not_equal, &done);
if (push_pop) {
pop(dst);
jmp(on_not_int32);
}
} else {
j(equal, on_not_int32);
}
bind(&done);
if (push_pop) {
add(Operand(esp), Immediate(kPointerSize)); // Pop.
}
if (!scratch.is(dst)) {
mov(dst, scratch);
}
}
}
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,

View File

@ -29,6 +29,7 @@
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
#include "assembler.h"
#include "type-info.h"
namespace v8 {
namespace internal {
@ -225,12 +226,44 @@ class MacroAssembler: public Assembler {
sar(reg, kSmiTagSize);
}
// Modifies the register even if it does not contain a Smi!
void SmiUntag(Register reg, TypeInfo info, Label* non_smi) {
ASSERT(kSmiTagSize == 1);
sar(reg, kSmiTagSize);
if (info.IsSmi()) {
ASSERT(kSmiTag == 0);
j(carry, non_smi);
}
}
// Modifies the register even if it does not contain a Smi!
void SmiUntag(Register reg, Label* is_smi) {
ASSERT(kSmiTagSize == 1);
sar(reg, kSmiTagSize);
ASSERT(kSmiTag == 0);
j(not_carry, is_smi);
}
// Assumes input is a heap object.
void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);
// Assumes input is a heap number. Jumps on things out of range. Also jumps
// on the min negative int32. Ignores frational parts.
void ConvertToInt32(Register dst,
Register src, // Can be the same as dst.
Register scratch, // Can be no_reg or dst, but not src.
TypeInfo info,
Label* on_not_int32);
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object);
// Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object);
// Abort execution if argument is a smi. Used in debug code.
void AbortIfSmi(Register object);
// ---------------------------------------------------------------------------
// Exception handling