X64: Add an SHL optimization, fix a floating-point bug, fix xchg rax,r8 and printing of test ?ax, imm in disassembler.

Review URL: http://codereview.chromium.org/164399

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2673 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
whesse@chromium.org 2009-08-13 08:00:04 +00:00
parent d35c815e94
commit d7474a61a8
2 changed files with 58 additions and 5 deletions

View File

@ -5258,6 +5258,58 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
} }
break; break;
case Token::SHL:
if (reversed) {
Result constant_operand(value);
LikelySmiBinaryOperation(op, &constant_operand, operand,
overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
operand->ToRegister();
if (shift_value == 0) {
// Spill operand so it can be overwritten in the slow case.
frame_->Spill(operand->reg());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
operand->reg(),
operand->reg(),
smi_value,
overwrite_mode);
__ testl(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
deferred->BindExit();
frame_->Push(operand);
} else {
// Use a fresh temporary for nonzero shift values.
Result answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
answer.reg(),
operand->reg(),
smi_value,
overwrite_mode);
__ testl(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ movl(answer.reg(), operand->reg());
ASSERT(kSmiTag == 0); // adjust code if not the case
// We do no shifts, only the Smi conversion, if shift_value is 1.
if (shift_value > 1) {
__ shll(answer.reg(), Immediate(shift_value - 1));
}
// Convert int result to Smi, checking that it is in int range.
ASSERT(kSmiTagSize == 1); // adjust code if not the case
__ addl(answer.reg(), answer.reg());
deferred->Branch(overflow);
deferred->BindExit();
operand->Unuse();
frame_->Push(&answer);
}
}
break;
case Token::BIT_OR: case Token::BIT_OR:
case Token::BIT_XOR: case Token::BIT_XOR:
case Token::BIT_AND: { case Token::BIT_AND: {
@ -6013,6 +6065,8 @@ void Reference::SetValue(InitState init_state) {
__ testl(key.reg(), __ testl(key.reg(),
Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U))); Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
deferred->Branch(not_zero); deferred->Branch(not_zero);
// Ensure that the smi is zero-extended. This is not guaranteed.
__ movl(key.reg(), key.reg());
// Check that the receiver is not a smi. // Check that the receiver is not a smi.
__ testl(receiver.reg(), Immediate(kSmiTagMask)); __ testl(receiver.reg(), Immediate(kSmiTagMask));
@ -7172,14 +7226,14 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
__ jmp(&done); __ jmp(&done);
__ bind(&load_smi_1); __ bind(&load_smi_1);
__ sar(kScratchRegister, Immediate(kSmiTagSize)); __ sarl(kScratchRegister, Immediate(kSmiTagSize));
__ push(kScratchRegister); __ push(kScratchRegister);
__ fild_s(Operand(rsp, 0)); __ fild_s(Operand(rsp, 0));
__ pop(kScratchRegister); __ pop(kScratchRegister);
__ jmp(&done_load_1); __ jmp(&done_load_1);
__ bind(&load_smi_2); __ bind(&load_smi_2);
__ sar(kScratchRegister, Immediate(kSmiTagSize)); __ sarl(kScratchRegister, Immediate(kSmiTagSize));
__ push(kScratchRegister); __ push(kScratchRegister);
__ fild_s(Operand(rsp, 0)); __ fild_s(Operand(rsp, 0));
__ pop(kScratchRegister); __ pop(kScratchRegister);
@ -7534,7 +7588,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ j(negative, &non_smi_result); __ j(negative, &non_smi_result);
} }
// Tag smi result and return. // Tag smi result and return.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case ASSERT(kSmiTagSize == 1); // adjust code if not the case
__ lea(rax, Operand(rax, rax, times_1, kSmiTag)); __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
__ ret(2 * kPointerSize); __ ret(2 * kPointerSize);

View File

@ -105,7 +105,6 @@ static ByteMnemonic two_operands_instr[] = {
static ByteMnemonic zero_operands_instr[] = { static ByteMnemonic zero_operands_instr[] = {
{ 0xC3, UNSET_OP_ORDER, "ret" }, { 0xC3, UNSET_OP_ORDER, "ret" },
{ 0xC9, UNSET_OP_ORDER, "leave" }, { 0xC9, UNSET_OP_ORDER, "leave" },
{ 0x90, UNSET_OP_ORDER, "nop" },
{ 0xF4, UNSET_OP_ORDER, "hlt" }, { 0xF4, UNSET_OP_ORDER, "hlt" },
{ 0xCC, UNSET_OP_ORDER, "int3" }, { 0xCC, UNSET_OP_ORDER, "int3" },
{ 0x60, UNSET_OP_ORDER, "pushad" }, { 0x60, UNSET_OP_ORDER, "pushad" },
@ -1425,7 +1424,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
default: default:
UNREACHABLE(); UNREACHABLE();
} }
AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"ux", AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"x",
operand_size_code(), operand_size_code(),
value); value);
break; break;