MIPS: Some assembler-level optimizations on ARM.
Port r10541 (0aab14ba). BUG= TEST= Review URL: https://chromiumcodereview.appspot.com/9296046 Patch from Daniel Kalmar <kalmard@homejinni.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10568 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
ab5d47ef21
commit
6ab232b1f7
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -158,20 +158,18 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
|
||||
__ lw(a3, MemOperand(sp, 0));
|
||||
|
||||
// Set up the object header.
|
||||
__ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
|
||||
__ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
|
||||
__ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
|
||||
__ li(a2, Operand(Smi::FromInt(length)));
|
||||
__ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
|
||||
__ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
|
||||
|
||||
// Set up the fixed slots.
|
||||
// Set up the fixed slots, copy the global object from the previous context.
|
||||
__ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
__ li(a1, Operand(Smi::FromInt(0)));
|
||||
__ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
||||
__ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
|
||||
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
|
||||
|
||||
// Copy the global object from the previous context.
|
||||
__ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
__ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
|
||||
// Initialize the rest of the slots to undefined.
|
||||
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
|
||||
@ -229,14 +227,12 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
||||
__ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
|
||||
__ bind(&after_sentinel);
|
||||
|
||||
// Set up the fixed slots.
|
||||
// Set up the fixed slots, copy the global object from the previous context.
|
||||
__ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
|
||||
__ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
|
||||
__ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
|
||||
__ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
|
||||
|
||||
// Copy the global object from the previous context.
|
||||
__ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
|
||||
__ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX));
|
||||
__ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
|
||||
|
||||
// Initialize the rest of the slots to the hole value.
|
||||
__ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
|
||||
@ -592,7 +588,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
|
||||
|
||||
Label is_smi, done;
|
||||
|
||||
__ JumpIfSmi(object, &is_smi);
|
||||
// Smi-check
|
||||
__ UntagAndJumpIfSmi(scratch1, object, &is_smi);
|
||||
// Heap number check
|
||||
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
|
||||
|
||||
// Handle loading a double from a heap number.
|
||||
@ -619,7 +617,6 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
|
||||
if (CpuFeatures::IsSupported(FPU)) {
|
||||
CpuFeatures::Scope scope(FPU);
|
||||
// Convert smi to double using FPU instructions.
|
||||
__ SmiUntag(scratch1, object);
|
||||
__ mtc1(scratch1, dst);
|
||||
__ cvt_d_w(dst, dst);
|
||||
if (destination == kCoreRegisters) {
|
||||
@ -654,11 +651,10 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
|
||||
Heap::kHeapNumberMapRootIndex,
|
||||
"HeapNumberMap register clobbered.");
|
||||
}
|
||||
Label is_smi;
|
||||
Label done;
|
||||
Label not_in_int32_range;
|
||||
|
||||
__ JumpIfSmi(object, &is_smi);
|
||||
__ UntagAndJumpIfSmi(dst, object, &done);
|
||||
__ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
|
||||
__ Branch(not_number, ne, scratch1, Operand(heap_number_map));
|
||||
__ ConvertToInt32(object,
|
||||
@ -678,10 +674,6 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
|
||||
scratch2,
|
||||
scratch3);
|
||||
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&is_smi);
|
||||
__ SmiUntag(dst, object);
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
@ -863,10 +855,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
||||
|
||||
Label done;
|
||||
|
||||
// Untag the object into the destination register.
|
||||
__ SmiUntag(dst, object);
|
||||
// Just return if the object is a smi.
|
||||
__ JumpIfSmi(object, &done);
|
||||
__ UntagAndJumpIfSmi(dst, object, &done);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
__ AbortIfNotRootValue(heap_number_map,
|
||||
@ -3605,7 +3594,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
const Register scratch = t5;
|
||||
const Register scratch2 = t3;
|
||||
|
||||
Label call_runtime, done, exponent_not_smi, int_exponent;
|
||||
Label call_runtime, done, int_exponent;
|
||||
if (exponent_type_ == ON_STACK) {
|
||||
Label base_is_smi, unpack_exponent;
|
||||
// The exponent and base are supplied as arguments on the stack.
|
||||
@ -3616,7 +3605,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
|
||||
|
||||
__ JumpIfSmi(base, &base_is_smi);
|
||||
__ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
|
||||
__ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
|
||||
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
|
||||
|
||||
@ -3624,27 +3613,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
__ jmp(&unpack_exponent);
|
||||
|
||||
__ bind(&base_is_smi);
|
||||
__ SmiUntag(base);
|
||||
__ mtc1(base, single_scratch);
|
||||
__ mtc1(scratch, single_scratch);
|
||||
__ cvt_d_w(double_base, single_scratch);
|
||||
__ bind(&unpack_exponent);
|
||||
|
||||
__ JumpIfNotSmi(exponent, &exponent_not_smi);
|
||||
__ SmiUntag(exponent);
|
||||
__ jmp(&int_exponent);
|
||||
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
|
||||
|
||||
__ bind(&exponent_not_smi);
|
||||
__ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
|
||||
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
|
||||
__ ldc1(double_exponent,
|
||||
FieldMemOperand(exponent, HeapNumber::kValueOffset));
|
||||
} else if (exponent_type_ == TAGGED) {
|
||||
// Base is already in double_base.
|
||||
__ JumpIfNotSmi(exponent, &exponent_not_smi);
|
||||
__ SmiUntag(exponent);
|
||||
__ jmp(&int_exponent);
|
||||
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
|
||||
|
||||
__ bind(&exponent_not_smi);
|
||||
__ ldc1(double_exponent,
|
||||
FieldMemOperand(exponent, HeapNumber::kValueOffset));
|
||||
}
|
||||
@ -3724,13 +3706,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&int_exponent_convert);
|
||||
__ mfc1(exponent, single_scratch);
|
||||
__ mfc1(scratch, single_scratch);
|
||||
}
|
||||
|
||||
// Calculate power with integer exponent.
|
||||
__ bind(&int_exponent);
|
||||
|
||||
__ mov(scratch, exponent); // Back up exponent.
|
||||
// Get two copies of exponent in the registers scratch and exponent.
|
||||
if (exponent_type_ == INTEGER) {
|
||||
__ mov(scratch, exponent);
|
||||
} else {
|
||||
// Exponent has previously been stored into scratch as untagged integer.
|
||||
__ mov(exponent, scratch);
|
||||
}
|
||||
|
||||
__ mov_d(double_scratch, double_base); // Back up base.
|
||||
__ Move(double_result, 1.0);
|
||||
|
||||
@ -5298,11 +5287,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// Set input, index and length fields from arguments.
|
||||
__ lw(a1, MemOperand(sp, kPointerSize * 0));
|
||||
__ lw(a2, MemOperand(sp, kPointerSize * 1));
|
||||
__ lw(t2, MemOperand(sp, kPointerSize * 2));
|
||||
__ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
|
||||
__ lw(a1, MemOperand(sp, kPointerSize * 1));
|
||||
__ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
|
||||
__ lw(a1, MemOperand(sp, kPointerSize * 2));
|
||||
__ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
|
||||
__ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
|
||||
__ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
|
||||
|
||||
// Fill out the elements FixedArray.
|
||||
// v0: JSArray, tagged.
|
||||
@ -6069,10 +6058,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
|
||||
// safe in this case.
|
||||
__ JumpIfSmi(a2, &runtime, at, USE_DELAY_SLOT);
|
||||
__ SmiUntag(a2);
|
||||
__ JumpIfSmi(a3, &runtime, at, USE_DELAY_SLOT);
|
||||
__ SmiUntag(a3);
|
||||
__ UntagAndJumpIfSmi(a2, a2, &runtime);
|
||||
__ UntagAndJumpIfSmi(a3, a3, &runtime);
|
||||
|
||||
// Both a2 and a3 are untagged integers.
|
||||
|
||||
@ -6156,10 +6143,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
__ bind(&sliced_string);
|
||||
// Sliced string. Fetch parent and correct start index by offset.
|
||||
__ lw(t1, FieldMemOperand(v0, SlicedString::kOffsetOffset));
|
||||
__ sra(t1, t1, 1);
|
||||
__ Addu(a3, a3, t1);
|
||||
__ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
|
||||
__ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
|
||||
__ sra(t0, t0, 1); // Add offset to index.
|
||||
__ Addu(a3, a3, t0);
|
||||
// Update instance type.
|
||||
__ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
|
||||
__ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -105,10 +105,10 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
|
||||
__ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
|
||||
__ AllocateInNewSpace(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
|
||||
// t2: destination FixedDoubleArray, not tagged as heap object
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
|
||||
// Set destination FixedDoubleArray's length.
|
||||
__ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
|
||||
__ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
|
||||
// Update receiver's map.
|
||||
|
||||
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
|
||||
@ -159,10 +159,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
|
||||
__ lw(t5, MemOperand(a3));
|
||||
__ Addu(a3, a3, kIntSize);
|
||||
// t5: current element
|
||||
__ JumpIfNotSmi(t5, &convert_hole);
|
||||
__ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
|
||||
|
||||
// Normal smi, convert to double and store.
|
||||
__ SmiUntag(t5);
|
||||
if (fpu_supported) {
|
||||
CpuFeatures::Scope scope(FPU);
|
||||
__ mtc1(t5, f0);
|
||||
@ -187,6 +186,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
|
||||
// Hole found, store the-hole NaN.
|
||||
__ bind(&convert_hole);
|
||||
if (FLAG_debug_code) {
|
||||
// Restore a "smi-untagged" heap object.
|
||||
__ SmiTag(t5);
|
||||
__ Or(t5, t5, Operand(1));
|
||||
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
|
||||
__ Assert(eq, "object found in smi-only array", at, Operand(t5));
|
||||
}
|
||||
@ -225,10 +227,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
__ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
|
||||
__ AllocateInNewSpace(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
|
||||
// t2: destination FixedArray, not tagged as heap object
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
|
||||
__ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
|
||||
// Set destination FixedDoubleArray's length.
|
||||
__ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
|
||||
__ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
|
||||
|
||||
// Prepare for conversion loop.
|
||||
__ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
|
||||
@ -333,9 +335,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
||||
// Handle slices.
|
||||
Label indirect_string_loaded;
|
||||
__ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
|
||||
__ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
|
||||
__ sra(at, result, kSmiTagSize);
|
||||
__ Addu(index, index, at);
|
||||
__ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
|
||||
__ jmp(&indirect_string_loaded);
|
||||
|
||||
// Handle cons strings.
|
||||
|
@ -3844,7 +3844,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
|
||||
Label load_smi, heap_number, done;
|
||||
|
||||
// Smi check.
|
||||
__ JumpIfSmi(input_reg, &load_smi);
|
||||
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
|
||||
|
||||
// Heap number map check.
|
||||
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
||||
@ -3877,7 +3877,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
|
||||
|
||||
// Smi to double register conversion
|
||||
__ bind(&load_smi);
|
||||
__ SmiUntag(scratch, input_reg); // Untag smi before converting to float.
|
||||
// scratch: untagged value of input_reg
|
||||
__ mtc1(scratch, result_reg);
|
||||
__ cvt_d_w(result_reg, result_reg);
|
||||
__ bind(&done);
|
||||
@ -4160,7 +4160,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
||||
Label is_smi, done, heap_number;
|
||||
|
||||
// Both smi and heap number cases are handled.
|
||||
__ JumpIfSmi(input_reg, &is_smi);
|
||||
__ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
|
||||
|
||||
// Check for heap number
|
||||
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
||||
@ -4180,9 +4180,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
||||
__ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
|
||||
__ jmp(&done);
|
||||
|
||||
// smi
|
||||
__ bind(&is_smi);
|
||||
__ SmiUntag(scratch, input_reg);
|
||||
__ ClampUint8(result_reg, scratch);
|
||||
|
||||
__ bind(&done);
|
||||
|
@ -4541,6 +4541,40 @@ void MacroAssembler::SmiTagCheckOverflow(Register dst,
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::UntagAndJumpIfSmi(Register dst,
|
||||
Register src,
|
||||
Label* smi_case) {
|
||||
JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
|
||||
SmiUntag(dst, src);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
|
||||
Register src,
|
||||
Label* non_smi_case) {
|
||||
JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
|
||||
SmiUntag(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::JumpIfSmi(Register value,
|
||||
Label* smi_label,
|
||||
Register scratch,
|
||||
BranchDelaySlot bd) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
andi(scratch, value, kSmiTagMask);
|
||||
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
|
||||
}
|
||||
|
||||
void MacroAssembler::JumpIfNotSmi(Register value,
|
||||
Label* not_smi_label,
|
||||
Register scratch,
|
||||
BranchDelaySlot bd) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
andi(scratch, value, kSmiTagMask);
|
||||
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
|
||||
Register reg2,
|
||||
Label* on_not_both_smi) {
|
||||
|
@ -1242,22 +1242,25 @@ class MacroAssembler: public Assembler {
|
||||
sra(dst, src, kSmiTagSize);
|
||||
}
|
||||
|
||||
// Untag the source value into destination and jump if source is a smi.
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
|
||||
|
||||
// Untag the source value into destination and jump if source is not a smi.
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
|
||||
|
||||
// Jump the register contains a smi.
|
||||
inline void JumpIfSmi(Register value, Label* smi_label,
|
||||
Register scratch = at,
|
||||
BranchDelaySlot bd = PROTECT) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
andi(scratch, value, kSmiTagMask);
|
||||
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
|
||||
}
|
||||
void JumpIfSmi(Register value,
|
||||
Label* smi_label,
|
||||
Register scratch = at,
|
||||
BranchDelaySlot bd = PROTECT);
|
||||
|
||||
// Jump if the register contains a non-smi.
|
||||
inline void JumpIfNotSmi(Register value, Label* not_smi_label,
|
||||
Register scratch = at) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
andi(scratch, value, kSmiTagMask);
|
||||
Branch(not_smi_label, ne, scratch, Operand(zero_reg));
|
||||
}
|
||||
void JumpIfNotSmi(Register value,
|
||||
Label* not_smi_label,
|
||||
Register scratch = at,
|
||||
BranchDelaySlot bd = PROTECT);
|
||||
|
||||
// Jump if either of the registers contain a non-smi.
|
||||
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
|
||||
|
Loading…
Reference in New Issue
Block a user