ARM: Add support for and, or and xor to the type recording binary op stub.

Review URL: http://codereview.chromium.org/6250126

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6631 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
sgjesse@chromium.org 2011-02-04 10:52:19 +00:00
parent b254d727a6
commit 8d4e0bb39c
6 changed files with 308 additions and 102 deletions

View File

@ -396,6 +396,19 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2,
Label* not_number);
// Loads the number from object into dst as a 32-bit integer if possible. If
// the object is not a 32-bit integer control continues at the label
// not_int32. If VFP is supported double_scratch is used but not scratch2.
static void LoadNumberAsInteger(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
DwVfpRegister double_scratch,
Label* not_int32);
private:
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
@ -461,15 +474,21 @@ void FloatingPointHelper::LoadOperands(
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Destination destination,
Register object,
DwVfpRegister dst,
Register dst1,
Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* not_number) {
Destination destination,
Register object,
DwVfpRegister dst,
Register dst1,
Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* not_number) {
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
Label is_smi, done;
__ JumpIfSmi(object, &is_smi);
@ -514,6 +533,34 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
}
void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
DwVfpRegister double_scratch,
Label* not_int32) {
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
Label is_smi, done;
__ JumpIfSmi(object, &is_smi);
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
__ cmp(scratch1, heap_number_map);
__ b(ne, not_int32);
__ ConvertToInt32(
object, dst, scratch1, scratch2, double_scratch, not_int32);
__ jmp(&done);
__ bind(&is_smi);
__ SmiUntag(dst, object);
__ bind(&done);
}
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@ -1676,7 +1723,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
__ ConvertToInt32(lhs, r3, r5, r4, &slow);
__ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
__ jmp(&done_checking_lhs);
__ bind(&lhs_is_smi);
__ mov(r3, Operand(lhs, ASR, 1));
@ -1687,7 +1734,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
__ ConvertToInt32(rhs, r2, r5, r4, &slow);
__ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
__ jmp(&done_checking_rhs);
__ bind(&rhs_is_smi);
__ mov(r2, Operand(rhs, ASR, 1));
@ -2529,6 +2576,18 @@ void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
__ and_(right, left, Operand(scratch1));
__ Ret();
break;
case Token::BIT_OR:
__ orr(right, left, Operand(right));
__ Ret();
break;
case Token::BIT_AND:
__ and_(right, left, Operand(right));
__ Ret();
break;
case Token::BIT_XOR:
__ eor(right, left, Operand(right));
__ Ret();
break;
default:
UNREACHABLE();
}
@ -2545,90 +2604,179 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
Register scratch1 = r7;
Register scratch2 = r9;
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending
// on whether VFP3 is available.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
ASSERT(smi_operands || (not_numbers != NULL));
if (smi_operands && FLAG_debug_code) {
__ AbortIfNotSmi(left);
__ AbortIfNotSmi(right);
}
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Allocate new heap number for result.
Register result = r5;
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD: {
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
// Load the operands.
if (smi_operands) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(left);
__ AbortIfNotSmi(right);
}
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
} else {
FloatingPointHelper::LoadOperands(masm,
destination,
heap_number_map,
scratch1,
scratch2,
not_numbers);
}
// Allocate new heap number for result.
Register result = r5;
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
// Calculate the result.
if (destination == FloatingPointHelper::kVFPRegisters) {
// Using VFP registers:
// d6: Left value
// d7: Right value
CpuFeatures::Scope scope(VFP3);
switch (op_) {
case Token::ADD:
__ vadd(d5, d6, d7);
break;
case Token::SUB:
__ vsub(d5, d6, d7);
break;
case Token::MUL:
__ vmul(d5, d6, d7);
break;
case Token::DIV:
__ vdiv(d5, d6, d7);
break;
default:
UNREACHABLE();
}
// Load the operands.
if (smi_operands) {
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
} else {
FloatingPointHelper::LoadOperands(masm,
destination,
heap_number_map,
scratch1,
scratch2,
not_numbers);
}
__ sub(r0, result, Operand(kHeapObjectTag));
__ vstr(d5, r0, HeapNumber::kValueOffset);
__ add(r0, r0, Operand(kHeapObjectTag));
__ Ret();
} else {
// Using core registers:
// r0: Left value (least significant part of mantissa).
// r1: Left value (sign, exponent, top of mantissa).
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
// Calculate the result.
if (destination == FloatingPointHelper::kVFPRegisters) {
// Using VFP registers:
// d6: Left value
// d7: Right value
CpuFeatures::Scope scope(VFP3);
switch (op_) {
case Token::ADD:
__ vadd(d5, d6, d7);
break;
case Token::SUB:
__ vsub(d5, d6, d7);
break;
case Token::MUL:
__ vmul(d5, d6, d7);
break;
case Token::DIV:
__ vdiv(d5, d6, d7);
break;
default:
UNREACHABLE();
}
__ push(lr); // For later.
__ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
// Call C routine that may not cause GC or other trouble. r5 is callee
// save.
__ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
// Store answer in the overwritable heap number.
__ sub(r0, result, Operand(kHeapObjectTag));
__ vstr(d5, r0, HeapNumber::kValueOffset);
__ add(r0, r0, Operand(kHeapObjectTag));
__ Ret();
} else {
// Using core registers:
// r0: Left value (least significant part of mantissa).
// r1: Left value (sign, exponent, top of mantissa).
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
// Push the current return address before the C call. Return will be
// through pop(pc) below.
__ push(lr);
__ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
// Call C routine that may not cause GC or other trouble. r5 is callee
// save.
__ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
// Store answer in the overwritable heap number.
#if !defined(USE_ARM_EABI)
// Double returned in fp coprocessor register 0 and 1, encoded as
// register cr8. Offsets must be divisible by 4 for coprocessor so we
// need to substract the tag from r5.
__ sub(scratch1, result, Operand(kHeapObjectTag));
__ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
// Double returned in fp coprocessor register 0 and 1, encoded as
// register cr8. Offsets must be divisible by 4 for coprocessor so we
// need to substract the tag from r5.
__ sub(scratch1, result, Operand(kHeapObjectTag));
__ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
#else
// Double returned in registers 0 and 1.
__ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
// Double returned in registers 0 and 1.
__ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
#endif
__ mov(r0, Operand(result));
// And we are done.
__ pop(pc);
// Plase result in r0 and return to the pushed return address.
__ mov(r0, Operand(result));
__ pop(pc);
}
break;
}
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND: {
if (smi_operands) {
__ SmiUntag(r3, left);
__ SmiUntag(r2, right);
} else {
// Convert operands to 32-bit integers. Right in r2 and left in r3.
FloatingPointHelper::LoadNumberAsInteger(masm,
left,
r3,
heap_number_map,
scratch1,
scratch2,
d0,
not_numbers);
FloatingPointHelper::LoadNumberAsInteger(masm,
right,
r2,
heap_number_map,
scratch1,
scratch2,
d0,
not_numbers);
}
switch (op_) {
case Token::BIT_OR:
__ orr(r2, r3, Operand(r2));
break;
case Token::BIT_XOR:
__ eor(r2, r3, Operand(r2));
break;
case Token::BIT_AND:
__ and_(r2, r3, Operand(r2));
break;
default:
UNREACHABLE();
}
Label result_not_a_smi;
// Check that the *signed* result fits in a smi.
__ add(r3, r2, Operand(0x40000000), SetCC);
__ b(mi, &result_not_a_smi);
__ SmiTag(r0, r2);
__ Ret();
// Allocate new heap number for result.
__ bind(&result_not_a_smi);
__ AllocateHeapNumber(
r5, scratch1, scratch2, heap_number_map, gc_required);
// r2: Answer as signed int32.
// r5: Heap number to write answer into.
// Nothing can go wrong now, so move the heap number to r0, which is the
// result.
__ mov(r0, Operand(r5));
if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r2);
__ vcvt_f64_s32(d0, s0);
__ sub(r3, r0, Operand(kHeapObjectTag));
__ vstr(d0, r3, HeapNumber::kValueOffset);
__ Ret();
} else {
// Tail call that writes the int32 in r2 to the heap number in r0, using
// r3 as scratch. r0 is preserved and returned.
WriteInt32ToHeapNumberStub stub(r2, r0, r3);
__ TailCallStub(&stub);
}
break;
}
default:
UNREACHABLE();
}
}
@ -2646,7 +2794,10 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD);
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
Register left = r1;
Register right = r0;
@ -2678,7 +2829,10 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD);
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
result_type_ == TRBinaryOpIC::SMI) {
@ -2714,7 +2868,10 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD);
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
ASSERT(operands_type_ == TRBinaryOpIC::INT32);
@ -2727,7 +2884,10 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD);
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
Label not_numbers, call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
@ -2747,7 +2907,10 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD);
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
Label call_runtime;
@ -2812,6 +2975,15 @@ void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
case Token::MOD:
__ InvokeBuiltin(Builtins::MOD, JUMP_JS);
break;
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
break;
case Token::BIT_AND:
__ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
break;
case Token::BIT_XOR:
__ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
break;
default:
UNREACHABLE();
}
@ -3037,7 +3209,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow);
// Convert the heap number is r0 to an untagged integer in r1.
__ ConvertToInt32(r0, r1, r2, r3, &slow);
__ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
// Do the bitwise operation (move negated) and check if the result
// fits in a smi.

View File

@ -1110,7 +1110,7 @@ void DeferredInlineSmiOperation::GenerateNonSmiInput() {
Register int32 = r2;
// Not a 32bits signed int, fall back to the GenericBinaryOpStub.
__ ConvertToInt32(tos_register_, int32, r4, r5, entry_label());
__ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label());
// tos_register_ (r0 or r1): Original heap number.
// int32: signed 32bits int.

View File

@ -1554,7 +1554,10 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
op == Token::SUB ||
op == Token::MUL ||
op == Token::DIV ||
op == Token::MOD) {
op == Token::MOD ||
op == Token::BIT_OR ||
op == Token::BIT_AND ||
op == Token::BIT_XOR) {
TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub);
} else {

View File

@ -158,6 +158,9 @@ const char* LArithmeticT::Mnemonic() const {
case Token::MUL: return "mul-t";
case Token::MOD: return "mod-t";
case Token::DIV: return "div-t";
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
default:
UNREACHABLE();
return NULL;
@ -750,13 +753,23 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoBit(Token::Value op,
HBitwiseBinaryOperation* instr) {
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
return DefineSameAsFirst(new LBitI(op, left, right));
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
return DefineSameAsFirst(new LBitI(op, left, right));
} else {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LArithmeticT* result = new LArithmeticT(op, left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
}

View File

@ -1577,13 +1577,14 @@ void MacroAssembler::ConvertToInt32(Register source,
Register dest,
Register scratch,
Register scratch2,
DwVfpRegister double_scratch,
Label *not_int32) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
sub(scratch, source, Operand(kHeapObjectTag));
vldr(d0, scratch, HeapNumber::kValueOffset);
vcvt_s32_f64(s0, d0);
vmov(dest, s0);
vldr(double_scratch, scratch, HeapNumber::kValueOffset);
vcvt_s32_f64(double_scratch.low(), double_scratch);
vmov(dest, double_scratch.low());
// Signed vcvt instruction will saturate to the minimum (0x80000000) or
// maximun (0x7fffffff) signed 32bits integer when the double is out of
// range. When substracting one, the minimum signed integer becomes the
@ -1999,6 +2000,16 @@ void MacroAssembler::AbortIfNotSmi(Register object) {
}
void MacroAssembler::AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message) {
ASSERT(!src.is(ip));
LoadRoot(ip, root_value_index);
cmp(src, ip);
Assert(eq, message);
}
void MacroAssembler::JumpIfNotHeapNumber(Register object,
Register heap_number_map,
Register scratch,

View File

@ -589,11 +589,13 @@ class MacroAssembler: public Assembler {
// Convert the HeapNumber pointed to by source to a 32bits signed integer
// dest. If the HeapNumber does not fit into a 32bits signed integer branch
// to not_int32 label.
// to not_int32 label. If VFP3 is available double_scratch is used but not
// scratch2.
void ConvertToInt32(Register source,
Register dest,
Register scratch,
Register scratch2,
DwVfpRegister double_scratch,
Label *not_int32);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
@ -763,6 +765,11 @@ class MacroAssembler: public Assembler {
void AbortIfSmi(Register object);
void AbortIfNotSmi(Register object);
// Abort execution if argument is not the root value with the given index.
void AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message);
// ---------------------------------------------------------------------------
// HeapNumber utilities