Some assembler-level optimizations on ARM.

Review URL: https://chromiumcodereview.appspot.com/9223011

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10541 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
yangguo@chromium.org 2012-01-27 16:54:22 +00:00
parent 74feaa6c3d
commit f2eda210d0
5 changed files with 83 additions and 79 deletions

View File

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -122,7 +122,6 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
@ -157,20 +156,18 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ ldr(r3, MemOperand(sp, 0));
// Set up the object header.
__ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
__ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
// Set up the fixed slots.
// Set up the fixed slots, copy the global object from the previous context.
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(r1, Operand(Smi::FromInt(0)));
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
// Copy the global object from the previous context.
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
// Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
@ -229,14 +226,12 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Set up the fixed slots.
// Set up the fixed slots, copy the global object from the previous context.
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
// Copy the global object from the previous context.
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
__ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
__ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
// Initialize the rest of the slots to the hole value.
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
@ -326,8 +321,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
Label double_elements, check_fast_elements;
__ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
__ cmp(r0, ip);
__ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
__ b(ne, &check_fast_elements);
GenerateFastCloneShallowArrayCommon(masm, 0,
COPY_ON_WRITE_ELEMENTS, &slow_case);
@ -336,8 +330,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&check_fast_elements);
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r0, ip);
__ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
__ b(ne, &double_elements);
GenerateFastCloneShallowArrayCommon(masm, length_,
CLONE_ELEMENTS, &slow_case);
@ -590,7 +583,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Label is_smi, done;
__ JumpIfSmi(object, &is_smi);
// Smi-check
__ UntagAndJumpIfSmi(scratch1, object, &is_smi);
// Heap number check
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
@ -612,7 +607,6 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi to double using VFP instructions.
__ SmiUntag(scratch1, object);
__ vmov(dst.high(), scratch1);
__ vcvt_f64_s32(dst, dst.high());
if (destination == kCoreRegisters) {
@ -647,11 +641,10 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
Label is_smi;
Label done;
Label not_in_int32_range;
__ JumpIfSmi(object, &is_smi);
__ UntagAndJumpIfSmi(dst, object, &done);
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
__ cmp(scratch1, heap_number_map);
__ b(ne, not_number);
@ -671,10 +664,6 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
scratch1,
scratch2,
scratch3);
__ jmp(&done);
__ bind(&is_smi);
__ SmiUntag(dst, object);
__ bind(&done);
}
@ -847,10 +836,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Label done;
// Untag the object into the destination register.
__ SmiUntag(dst, object);
// Just return if the object is a smi.
__ JumpIfSmi(object, &done);
__ UntagAndJumpIfSmi(dst, object, &done);
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
@ -3310,8 +3296,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Check if cache matches: Double value is stored in uint32_t[2] array.
__ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
__ cmp(r2, r4);
__ b(ne, &calculate);
__ cmp(r3, r5);
__ cmp(r3, r5, eq);
__ b(ne, &calculate);
// Cache hit. Load result, cleanup and return.
Counters* counters = masm->isolate()->counters();
@ -3468,7 +3453,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch = r9;
const Register scratch2 = r7;
Label call_runtime, done, exponent_not_smi, int_exponent;
Label call_runtime, done, int_exponent;
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
@ -3479,7 +3464,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
__ JumpIfSmi(base, &base_is_smi);
__ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
__ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
@ -3488,16 +3473,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&unpack_exponent);
__ bind(&base_is_smi);
__ SmiUntag(base);
__ vmov(single_scratch, base);
__ vmov(single_scratch, scratch);
__ vcvt_f64_s32(double_base, single_scratch);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi);
__ SmiUntag(exponent);
__ jmp(&int_exponent);
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
__ bind(&exponent_not_smi);
__ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
@ -3505,11 +3486,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
FieldMemOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
// Base is already in double_base.
__ JumpIfNotSmi(exponent, &exponent_not_smi);
__ SmiUntag(exponent);
__ jmp(&int_exponent);
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
__ bind(&exponent_not_smi);
__ vldr(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
@ -3582,13 +3560,19 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&int_exponent_convert);
__ vcvt_u32_f64(single_scratch, double_exponent);
__ vmov(exponent, single_scratch);
__ vmov(scratch, single_scratch);
}
// Calculate power with integer exponent.
__ bind(&int_exponent);
__ mov(scratch, exponent); // Back up exponent.
// Get two copies of exponent in the registers scratch and exponent.
if (exponent_type_ == INTEGER) {
__ mov(scratch, exponent);
} else {
// Exponent has previously been stored into scratch as untagged integer.
__ mov(exponent, scratch);
}
__ vmov(double_scratch, double_base); // Back up base.
__ vmov(double_result, 1.0);
@ -4098,11 +4082,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// real lookup and update the call site cache.
if (!HasCallSiteInlineCheck()) {
Label miss;
__ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
__ cmp(function, ip);
__ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ b(ne, &miss);
__ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
__ cmp(map, ip);
__ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
__ b(ne, &miss);
__ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret(HasArgsInRegisters() ? 0 : 2);
@ -4727,8 +4709,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(last_match_info_elements,
FieldMemOperand(r0, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r0, ip);
__ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
__ b(ne, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information.
@ -5082,11 +5063,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set input, index and length fields from arguments.
__ ldr(r1, MemOperand(sp, kPointerSize * 0));
__ ldr(r2, MemOperand(sp, kPointerSize * 1));
__ ldr(r6, MemOperand(sp, kPointerSize * 2));
__ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
__ ldr(r1, MemOperand(sp, kPointerSize * 1));
__ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
__ ldr(r1, MemOperand(sp, kPointerSize * 2));
__ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
__ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
__ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
// Fill out the elements FixedArray.
// r0: JSArray, tagged.
@ -5436,8 +5417,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
__ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(result_, Operand(ip));
__ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
__ b(eq, &slow_case_);
__ bind(&exit_);
}
@ -5865,10 +5845,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
// If either to or from had the smi tag bit set, then carry is set now.
__ b(cs, &runtime); // Either "from" or "to" is not a smi.
__ b(mi, &runtime); // From is negative.
// We want to bailout to runtime here if From is negative. In that case, the
// next instruction is not executed and we fall through to bailing out to
// runtime. pl is the opposite of mi.
// Both r2 and r3 are untagged integers.
__ sub(r2, r2, Operand(r3), SetCC);
__ sub(r2, r2, Operand(r3), SetCC, pl);
__ b(mi, &runtime); // Fail if from > to.
// Make sure first argument is a string.
@ -5941,9 +5922,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
__ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset));
__ add(r3, r3, Operand(r5, ASR, 1));
__ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
__ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
__ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
// Update instance type.
__ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));

View File

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -104,10 +104,10 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ add(lr, lr, Operand(r5, LSL, 2));
__ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedDoubleArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
// Set destination FixedDoubleArray's length.
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
// Update receiver's map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
@ -155,10 +155,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ bind(&loop);
__ ldr(r9, MemOperand(r3, 4, PostIndex));
// r9: current element
__ JumpIfNotSmi(r9, &convert_hole);
__ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
// Normal smi, convert to double and store.
__ SmiUntag(r9);
if (vfp3_supported) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r9);
@ -181,6 +180,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object.
__ SmiTag(r9);
__ orr(r9, r9, Operand(1));
__ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
__ Assert(eq, "object found in smi-only array");
}
@ -208,9 +210,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label entry, loop, convert_hole, gc_required;
__ push(lr);
__ Push(r3, r2, r1, r0);
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
__ Push(r3, r2, r1, r0);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedDoubleArray
// r5: number of elements (smi-tagged)
@ -220,10 +221,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ add(r0, r0, Operand(r5, LSL, 1));
__ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
// Set destination FixedDoubleArray's length.
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
// Prepare for conversion loop.
__ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
@ -325,8 +326,8 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Handle slices.
Label indirect_string_loaded;
__ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ add(index, index, Operand(result, ASR, kSmiTagSize));
__ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ add(index, index, Operand(result, ASR, kSmiTagSize));
__ jmp(&indirect_string_loaded);
// Handle cons strings.
@ -336,8 +337,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// the string.
__ bind(&cons_string);
__ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
__ LoadRoot(ip, Heap::kEmptyStringRootIndex);
__ cmp(result, ip);
__ CompareRoot(result, Heap::kEmptyStringRootIndex);
__ b(ne, call_runtime);
// Get the first of the two strings and load its instance type.
__ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));

View File

@ -3929,7 +3929,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Label load_smi, heap_number, done;
// Smi check.
__ JumpIfSmi(input_reg, &load_smi);
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
// Heap number map check.
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@ -3968,7 +3968,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Smi to double register conversion
__ bind(&load_smi);
__ SmiUntag(scratch, input_reg); // Untag smi before converting to float.
// scratch: untagged value of input_reg
__ vmov(flt_scratch, scratch);
__ vcvt_f64_s32(result_reg, flt_scratch);
__ bind(&done);
@ -4256,7 +4256,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
Label is_smi, done, heap_number;
// Both smi and heap number cases are handled.
__ JumpIfSmi(input_reg, &is_smi);
__ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
// Check for heap number
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@ -4279,7 +4279,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// smi
__ bind(&is_smi);
__ SmiUntag(result_reg, input_reg);
__ ClampUint8(result_reg, result_reg);
__ bind(&done);

View File

@ -2965,6 +2965,22 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
}
void MacroAssembler::UntagAndJumpIfSmi(
Register dst, Register src, Label* smi_case) {
STATIC_ASSERT(kSmiTag == 0);
mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
b(cc, smi_case); // Shifter carry is not set for a smi.
}
void MacroAssembler::UntagAndJumpIfNotSmi(
Register dst, Register src, Label* non_smi_case) {
STATIC_ASSERT(kSmiTag == 0);
mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
b(cs, non_smi_case); // Shifter carry is set for a non-smi.
}
void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2,
Label* on_either_smi) {

View File

@ -1149,6 +1149,14 @@ class MacroAssembler: public Assembler {
mov(dst, Operand(src, ASR, kSmiTagSize), s);
}
// Untag the source value into destination and jump if source is a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
// Untag the source value into destination and jump if source is not a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));