Increase sanity of integer division handling on ARM

- In the INT32 BinaryOpStub, fix type feedback collection for DIV,
  bringing it in line with other platforms.
- In Lithium codegen, emit proper inlined code, don't call the stub.
- Drive-by fix: assert appropriate CpuFeaturesScope for SDIV.

R=ulan@chromium.org

Review URL: https://codereview.chromium.org/16082008

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15057 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
jkummerow@chromium.org 2013-06-11 10:47:44 +00:00
parent 9278a4b7b1
commit 57632e208a
8 changed files with 56 additions and 126 deletions

View File

@ -1368,6 +1368,7 @@ void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
void Assembler::sdiv(Register dst, Register src1, Register src2,
Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
ASSERT(IsEnabled(SUDIV));
emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
src2.code()*B8 | B4 | src1.code());
}

View File

@ -1707,6 +1707,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
__ Ret();
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm, SUDIV);
Label result_not_zero;
__ bind(&div_with_sdiv);
@ -1763,6 +1764,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
__ Ret();
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm, SUDIV);
__ bind(&modulo_with_sdiv);
__ mov(scratch2, right);
// Perform modulus with sdiv and mls.
@ -2208,42 +2210,25 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
UNREACHABLE();
}
if (op_ != Token::DIV) {
// These operations produce an integer result.
// Try to return a smi if we can.
// Otherwise return a heap number if allowed, or jump to type
// transition.
if (result_type_ <= BinaryOpIC::INT32) {
__ TryDoubleToInt32Exact(scratch1, d5, d8);
// If the ne condition is set, result does
// not fit in a 32-bit integer.
__ b(ne, &transition);
} else {
__ vcvt_s32_f64(s8, d5);
__ vmov(scratch1, s8);
}
// Check if the result fits in a smi.
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
// If not try to return a heap number.
__ b(mi, &return_heap_number);
// Check for minus zero. Return heap number for minus zero if
// double results are allowed; otherwise transition.
if (result_type_ <= BinaryOpIC::INT32) {
__ TryDoubleToInt32Exact(scratch1, d5, d8);
// If the ne condition is set, result does
// not fit in a 32-bit integer.
__ b(ne, &transition);
// Try to tag the result as a Smi, return heap number on overflow.
__ SmiTag(scratch1, SetCC);
__ b(vs, &return_heap_number);
// Check for minus zero, transition in that case (because we need
// to return a heap number).
Label not_zero;
__ cmp(scratch1, Operand::Zero());
ASSERT(kSmiTag == 0);
__ b(ne, &not_zero);
__ vmov(scratch2, d5.high());
__ tst(scratch2, Operand(HeapNumber::kSignMask));
__ b(ne, result_type_ <= BinaryOpIC::INT32 ? &transition
: &return_heap_number);
__ b(ne, &transition);
__ bind(&not_zero);
// Tag the result and return.
__ SmiTag(r0, scratch1);
__ mov(r0, scratch1);
__ Ret();
} else {
// DIV just falls through to allocating a heap number.
}
__ bind(&return_heap_number);

View File

@ -1345,18 +1345,14 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
LDivI* div =
new(zone()) LDivI(value, UseOrConstant(instr->right()));
new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
return AssignEnvironment(DefineSameAsFirst(div));
}
// TODO(1042) The fixed register allocation
// is needed because we call TypeRecordingBinaryOpStub from
// the generated code, which requires registers r0
// and r1 to be used. We should remove that
// when we provide a native implementation.
LOperand* dividend = UseFixed(instr->left(), r0);
LOperand* divisor = UseFixed(instr->right(), r1);
return AssignEnvironment(AssignPointerMap(
DefineFixed(new(zone()) LDivI(dividend, divisor), r0)));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineAsRegister(div));
} else {
return DoArithmeticT(Token::DIV, instr);
}

View File

@ -597,15 +597,17 @@ class LModI: public LTemplateInstruction<1, 2, 2> {
};
class LDivI: public LTemplateInstruction<1, 2, 0> {
class LDivI: public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right) {
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)

View File

@ -1417,21 +1417,6 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
void LCodeGen::DoDivI(LDivI* instr) {
class DeferredDivI: public LDeferredCode {
public:
DeferredDivI(LCodeGen* codegen, LDivI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredBinaryOpStub(instr_->pointer_map(),
instr_->left(),
instr_->right(),
Token::DIV);
}
virtual LInstruction* instr() { return instr_; }
private:
LDivI* instr_;
};
if (instr->hydrogen()->HasPowerOf2Divisor()) {
Register dividend = ToRegister(instr->left());
int32_t divisor =
@ -1498,40 +1483,32 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_min_int);
}
Label done, deoptimize;
// Test for a few common cases first.
__ cmp(right, Operand(1));
__ mov(result, left, LeaveCC, eq);
__ b(eq, &done);
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
__ sdiv(result, left, right);
__ cmp(right, Operand(2));
__ tst(left, Operand(1), eq);
__ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
__ b(eq, &done);
// Compute remainder and deopt if it's not zero.
const Register remainder = scratch0();
__ mls(remainder, result, right, left);
__ cmp(remainder, Operand::Zero());
DeoptimizeIf(ne, instr->environment());
} else {
const DoubleRegister vleft = ToDoubleRegister(instr->temp());
const DoubleRegister vright = double_scratch0();
__ vmov(vleft.low(), left);
__ vmov(vright.low(), right);
__ vcvt_f64_s32(vleft, vleft.low());
__ vcvt_f64_s32(vright, vright.low());
__ vdiv(vleft, vleft, vright); // vleft now contains the result.
__ cmp(right, Operand(4));
__ tst(left, Operand(3), eq);
__ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
__ b(eq, &done);
// Call the stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
__ TrySmiTag(left, &deoptimize);
__ TrySmiTag(right, &deoptimize);
__ b(al, deferred->entry());
__ bind(deferred->exit());
// If the result in r0 is a Smi, untag it, else deoptimize.
__ JumpIfNotSmi(result, &deoptimize);
__ SmiUntag(result);
__ b(&done);
__ bind(&deoptimize);
DeoptimizeIf(al, instr->environment());
__ bind(&done);
// Convert back to integer32; deopt if exact conversion is not possible.
// Use vright as scratch register.
__ vcvt_s32_f64(vright.low(), vleft);
__ vmov(result, vright.low());
__ vcvt_f64_s32(vright, vright.low());
__ VFPCompareAndSetFlags(vleft, vright);
DeoptimizeIf(ne, instr->environment());
}
}
@ -1630,38 +1607,6 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
}
void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
LOperand* left_argument,
LOperand* right_argument,
Token::Value op) {
Register left = ToRegister(left_argument);
Register right = ToRegister(right_argument);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
// Move left to r1 and right to r0 for the stub call.
if (left.is(r1)) {
__ Move(r0, right);
} else if (left.is(r0) && right.is(r1)) {
__ Swap(r0, r1, r2);
} else if (left.is(r0)) {
ASSERT(!right.is(r1));
__ mov(r1, r0);
__ mov(r0, right);
} else {
ASSERT(!left.is(r0) && !right.is(r0));
__ mov(r0, right);
__ mov(r1, left);
}
BinaryOpStub stub(op, OVERWRITE_LEFT);
__ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles(pointer_map,
0,
Safepoint::kNoLazyDeopt);
// Overwrite the stored value of r0 with the result of the stub.
__ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
}
void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());

View File

@ -138,10 +138,6 @@ class LCodeGen BASE_EMBEDDED {
void FinishCode(Handle<Code> code);
// Deferred code support.
void DoDeferredBinaryOpStub(LPointerMap* pointer_map,
LOperand* left_argument,
LOperand* right_argument,
Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };

View File

@ -1513,6 +1513,11 @@ HValue* HUnaryMathOperation::Canonicalize() {
// If the input is integer32 then we replace the floor instruction
// with its input. This happens before the representation changes are
// introduced.
// TODO(2205): The above comment is lying. All of this happens
// *after* representation changes are introduced. We should check
// for value->IsChange() and react accordingly if yes.
if (value()->representation().IsInteger32()) return value();
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \

View File

@ -2351,7 +2351,7 @@ void Assembler::movd(const Operand& dst, XMMRegister src) {
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
ASSERT(CpuFeatures::IsSupported(SSE4_1));
ASSERT(IsEnabled(SSE4_1));
ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
EMIT(0x66);