Fixed a number of issues on x64 crankshaft port:

- Don't use SmiSub when overflow can occur. It asserts that overflow
  does not happen.

- Actually use CompareICs and signal to crankshaft whether or not smi
  code was inlined.

- Fix bug in CmpI where 64 bits were compared instead of 32 bits.

- Implement Throw, DeferredStackCheck, StoreKeyedFastElement in
  lithium backend.

BUG=
TEST=

Review URL: http://codereview.chromium.org/6312193

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6669 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
ager@chromium.org 2011-02-08 07:49:59 +00:00
parent 2926151fdc
commit 52cfd6ab16
9 changed files with 185 additions and 43 deletions

View File

@ -3949,8 +3949,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub for +1/-1. // Call stub for +1/-1.
__ mov(edx, eax); __ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1))); __ mov(eax, Immediate(Smi::FromInt(1)));
TypeRecordingBinaryOpStub stub(expr->binary_op(), TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
NO_OVERWRITE);
EmitCallIC(stub.GetCode(), &patch_site); EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done); __ bind(&done);

View File

@ -2035,7 +2035,10 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
ASSERT(result.is(elements)); ASSERT(result.is(elements));
// Load the result. // Load the result.
__ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize)); __ mov(result, FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize));
// Check for the hole value. // Check for the hole value.
__ cmp(result, Factory::the_hole_value()); __ cmp(result, Factory::the_hole_value());
@ -2661,13 +2664,20 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
__ mov(FieldOperand(elements, offset), value); __ mov(FieldOperand(elements, offset), value);
} else { } else {
__ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize), __ mov(FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize),
value); value);
} }
if (instr->hydrogen()->NeedsWriteBarrier()) { if (instr->hydrogen()->NeedsWriteBarrier()) {
// Compute address of modified element and store it into key register. // Compute address of modified element and store it into key register.
__ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize)); __ lea(key,
FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize));
__ RecordWrite(elements, key, value); __ RecordWrite(elements, key, value);
} }
} }

View File

@ -316,6 +316,7 @@ Script.prototype.lineFromPosition = function(position) {
return i; return i;
} }
} }
return -1; return -1;
} }

View File

@ -565,6 +565,8 @@ class Assembler : public Malloced {
// One byte opcode for test eax,0xXXXXXXXX. // One byte opcode for test eax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9; static const byte kTestEaxByte = 0xA9;
// One byte opcode for test al, 0xXX.
static const byte kTestAlByte = 0xA8;
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Code generation // Code generation

View File

@ -4627,10 +4627,10 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
if (GetCondition() == equal) { if (GetCondition() == equal) {
// For equality we do not care about the sign of the result. // For equality we do not care about the sign of the result.
__ SmiSub(rax, rax, rdx); __ subq(rax, rdx);
} else { } else {
NearLabel done; NearLabel done;
__ SmiSub(rdx, rdx, rax); __ subq(rdx, rax);
__ j(no_overflow, &done); __ j(no_overflow, &done);
// Correct sign of result in case of overflow. // Correct sign of result in case of overflow.
__ SmiNot(rdx, rdx); __ SmiNot(rdx, rdx);

View File

@ -43,6 +43,58 @@ namespace internal {
#define __ ACCESS_MASM(masm_) #define __ ACCESS_MASM(masm_)
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm)
: masm_(masm) {
#ifdef DEBUG
info_emitted_ = false;
#endif
}
~JumpPatchSite() {
ASSERT(patch_site_.is_bound() == info_emitted_);
}
void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
__ testb(reg, Immediate(kSmiTagMask));
EmitJump(not_carry, target); // Always taken before patched.
}
void EmitJumpIfSmi(Register reg, NearLabel* target) {
__ testb(reg, Immediate(kSmiTagMask));
EmitJump(carry, target); // Never taken before patched.
}
void EmitPatchInfo() {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
ASSERT(is_int8(delta_to_patch_site));
__ testl(rax, Immediate(delta_to_patch_site));
#ifdef DEBUG
info_emitted_ = true;
#endif
}
bool is_bound() const { return patch_site_.is_bound(); }
private:
// jc will be patched with jz, jnc will become jnz.
void EmitJump(Condition cc, NearLabel* target) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
ASSERT(cc == carry || cc == not_carry);
__ bind(&patch_site_);
__ j(cc, target);
}
MacroAssembler* masm_;
Label patch_site_;
#ifdef DEBUG
bool info_emitted_;
#endif
};
// Generate code for a JS function. On entry to the function the receiver // Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the // and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the // return address on top of them. The actual argument count matches the
@ -728,21 +780,25 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Perform the comparison as if via '==='. // Perform the comparison as if via '==='.
__ movq(rdx, Operand(rsp, 0)); // Switch value. __ movq(rdx, Operand(rsp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT); bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) { if (inline_smi_code) {
Label slow_case; NearLabel slow_case;
__ JumpIfNotBothSmi(rdx, rax, &slow_case); __ movq(rcx, rdx);
__ SmiCompare(rdx, rax); __ or_(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
__ cmpq(rdx, rax);
__ j(not_equal, &next_test); __ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed. __ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target()->entry_label()); __ jmp(clause->body_target()->entry_label());
__ bind(&slow_case); __ bind(&slow_case);
} }
CompareFlags flags = inline_smi_code // Record position before stub call for type feedback.
? NO_SMI_COMPARE_IN_STUB SetSourcePosition(clause->position());
: NO_COMPARE_FLAGS; Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
CompareStub stub(equal, true, flags); EmitCallIC(ic, &patch_site);
__ CallStub(&stub);
__ testq(rax, rax); __ testq(rax, rax);
__ j(not_equal, &next_test); __ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed. __ Drop(1); // Switch value is no longer needed.
@ -1522,16 +1578,17 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
// Do combined smi check of the operands. Left operand is on the // Do combined smi check of the operands. Left operand is on the
// stack (popped into rdx). Right operand is in rax but moved into // stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier. // rcx to make the shifts easier.
Label done, stub_call, smi_case; NearLabel done, stub_call, smi_case;
__ pop(rdx); __ pop(rdx);
__ movq(rcx, rax); __ movq(rcx, rax);
Condition smi = masm()->CheckBothSmi(rdx, rax); __ or_(rax, rdx);
__ j(smi, &smi_case); JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(rax, &smi_case);
__ bind(&stub_call); __ bind(&stub_call);
TypeRecordingBinaryOpStub stub(op, mode);
__ movq(rax, rcx); __ movq(rax, rcx);
__ CallStub(&stub); TypeRecordingBinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done); __ jmp(&done);
__ bind(&smi_case); __ bind(&smi_case);
@ -3197,7 +3254,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} }
// Inline smi case if we are in a loop. // Inline smi case if we are in a loop.
Label stub_call, done; NearLabel stub_call, done;
JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(expr->op())) { if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) { if (expr->op() == Token::INC) {
__ SmiAddConstant(rax, rax, Smi::FromInt(1)); __ SmiAddConstant(rax, rax, Smi::FromInt(1));
@ -3207,8 +3266,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ j(overflow, &stub_call); __ j(overflow, &stub_call);
// We could eliminate this smi check if we split the code at // We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber. // the first smi check before calling ToNumber.
is_smi = masm_->CheckSmi(rax); patch_site.EmitJumpIfSmi(rax, &done);
__ j(is_smi, &done);
__ bind(&stub_call); __ bind(&stub_call);
// Call stub. Undo operation first. // Call stub. Undo operation first.
@ -3230,9 +3288,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ movq(rdx, rax); __ movq(rdx, rax);
__ Move(rax, Smi::FromInt(1)); __ Move(rax, Smi::FromInt(1));
} }
__ CallStub(&stub); EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done); __ bind(&done);
// Store the value returned in rax. // Store the value returned in rax.
switch (assign_type) { switch (assign_type) {
case VARIABLE: case VARIABLE:
@ -3500,19 +3558,21 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
} }
bool inline_smi_code = ShouldInlineSmiCase(op); bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) { if (inline_smi_code) {
Label slow_case; NearLabel slow_case;
__ JumpIfNotBothSmi(rax, rdx, &slow_case); __ movq(rcx, rdx);
__ SmiCompare(rdx, rax); __ or_(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
__ cmpq(rdx, rax);
Split(cc, if_true, if_false, NULL); Split(cc, if_true, if_false, NULL);
__ bind(&slow_case); __ bind(&slow_case);
} }
CompareFlags flags = inline_smi_code // Record position and call the compare IC.
? NO_SMI_COMPARE_IN_STUB SetSourcePosition(expr->position());
: NO_COMPARE_FLAGS; Handle<Code> ic = CompareIC::GetUninitialized(op);
CompareStub stub(cc, strict, flags); EmitCallIC(ic, &patch_site);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ testq(rax, rax); __ testq(rax, rax);
@ -3617,6 +3677,16 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
} }
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
__ call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
} else {
__ nop(); // Signals no inlined code.
}
}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize)); ASSERT(IsAligned(frame_offset, kPointerSize));
__ movq(Operand(rbp, frame_offset), value); __ movq(Operand(rbp, frame_offset), value);

View File

@ -1673,11 +1673,23 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
} }
static bool HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test al, nothing
// was inlined.
return *test_instruction_address == Assembler::kTestAlByte;
}
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope; HandleScope scope;
Handle<Code> rewritten; Handle<Code> rewritten;
State previous_state = GetState(); State previous_state = GetState();
State state = TargetState(previous_state, false, x, y);
State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
if (state == GENERIC) { if (state == GENERIC) {
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS); CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
rewritten = stub.GetCode(); rewritten = stub.GetCode();

View File

@ -810,7 +810,13 @@ void LCodeGen::DoBitNotI(LBitNotI* instr) {
void LCodeGen::DoThrow(LThrow* instr) { void LCodeGen::DoThrow(LThrow* instr) {
Abort("Unimplemented: %s", "DoThrow"); __ push(ToRegister(instr->InputAt(0)));
CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
Comment("Unreachable code.");
__ int3();
}
} }
@ -963,7 +969,11 @@ void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
void LCodeGen::DoDeferredStackCheck(LGoto* instr) { void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
Abort("Unimplemented: %s", "DoDeferredStackCheck"); __ Pushad();
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
__ Popad();
} }
@ -1022,9 +1032,9 @@ void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
__ cmpl(ToOperand(left), Immediate(value)); __ cmpl(ToOperand(left), Immediate(value));
} }
} else if (right->IsRegister()) { } else if (right->IsRegister()) {
__ cmpq(ToRegister(left), ToRegister(right)); __ cmpl(ToRegister(left), ToRegister(right));
} else { } else {
__ cmpq(ToRegister(left), ToOperand(right)); __ cmpl(ToRegister(left), ToOperand(right));
} }
} }
@ -1869,7 +1879,33 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Abort("Unimplemented: %s", "DoStoreKeyedFastElement"); Register value = ToRegister(instr->value());
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset =
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
__ movq(FieldOperand(elements, offset), value);
} else {
__ movq(FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize),
value);
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Compute address of modified element and store it into key register.
__ lea(key, FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize));
__ RecordWrite(elements, key, value);
}
} }

View File

@ -1439,8 +1439,8 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) { LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
Abort("Unimplemented: %s", "DoThrow"); LOperand* value = UseFixed(instr->value(), rax);
return NULL; return MarkAsCall(new LThrow(value), instr);
} }
@ -1640,8 +1640,20 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedFastElement( LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
HStoreKeyedFastElement* instr) { HStoreKeyedFastElement* instr) {
Abort("Unimplemented: %s", "DoStoreKeyedFastElement"); bool needs_write_barrier = instr->NeedsWriteBarrier();
return NULL; ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
} }