MIPS: Use the Lsa() macro/r6 instruction in existing code.

BUG=

Review URL: https://codereview.chromium.org/1608933003

Cr-Commit-Position: refs/heads/master@{#33390}
This commit is contained in:
balazs.kilvady 2016-01-19 08:31:30 -08:00 committed by Commit bot
parent 68654b6476
commit d9af984e70
9 changed files with 108 additions and 212 deletions

View File

@ -1411,8 +1411,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (constant < 0) __ Subu(result, zero_reg, result); if (constant < 0) __ Subu(result, zero_reg, result);
} else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
int32_t shift = WhichPowerOf2(constant_abs - 1); int32_t shift = WhichPowerOf2(constant_abs - 1);
__ sll(scratch, left, shift); __ Lsa(result, left, left, shift);
__ Addu(result, scratch, left);
// Correct the sign of the result if the constant is negative. // Correct the sign of the result if the constant is negative.
if (constant < 0) __ Subu(result, zero_reg, result); if (constant < 0) __ Subu(result, zero_reg, result);
} else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
@ -2578,8 +2577,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
Register reg = ToRegister(instr->parameter_count()); Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi // The argument count parameter is a smi
__ SmiUntag(reg); __ SmiUntag(reg);
__ sll(at, reg, kPointerSizeLog2); __ Lsa(sp, sp, reg, kPointerSizeLog2);
__ Addu(sp, sp, at);
} }
__ Jump(ra); __ Jump(ra);
@ -2781,8 +2779,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register index = ToRegister(instr->index()); Register index = ToRegister(instr->index());
__ li(at, Operand(const_length + 1)); __ li(at, Operand(const_length + 1));
__ Subu(result, at, index); __ Subu(result, at, index);
__ sll(at, result, kPointerSizeLog2); __ Lsa(at, arguments, result, kPointerSizeLog2);
__ Addu(at, arguments, at);
__ lw(result, MemOperand(at)); __ lw(result, MemOperand(at));
} }
} else if (instr->index()->IsConstantOperand()) { } else if (instr->index()->IsConstantOperand()) {
@ -2791,12 +2788,10 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
int loc = const_index - 1; int loc = const_index - 1;
if (loc != 0) { if (loc != 0) {
__ Subu(result, length, Operand(loc)); __ Subu(result, length, Operand(loc));
__ sll(at, result, kPointerSizeLog2); __ Lsa(at, arguments, result, kPointerSizeLog2);
__ Addu(at, arguments, at);
__ lw(result, MemOperand(at)); __ lw(result, MemOperand(at));
} else { } else {
__ sll(at, length, kPointerSizeLog2); __ Lsa(at, arguments, length, kPointerSizeLog2);
__ Addu(at, arguments, at);
__ lw(result, MemOperand(at)); __ lw(result, MemOperand(at));
} }
} else { } else {
@ -2804,8 +2799,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register index = ToRegister(instr->index()); Register index = ToRegister(instr->index());
__ Subu(result, length, index); __ Subu(result, length, index);
__ Addu(result, result, 1); __ Addu(result, result, 1);
__ sll(at, result, kPointerSizeLog2); __ Lsa(at, arguments, result, kPointerSizeLog2);
__ Addu(at, arguments, at);
__ lw(result, MemOperand(at)); __ lw(result, MemOperand(at));
} }
} }
@ -2914,8 +2908,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
key = ToRegister(instr->key()); key = ToRegister(instr->key());
int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift; ? (element_size_shift - kSmiTagSize) : element_size_shift;
__ sll(at, key, shift_size); __ Lsa(scratch, scratch, key, shift_size);
__ Addu(scratch, scratch, at);
} }
__ ldc1(result, MemOperand(scratch)); __ ldc1(result, MemOperand(scratch));
@ -2946,11 +2939,9 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// during bound check elimination with the index argument to the bounds // during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too. // check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsSmi()) { if (instr->hydrogen()->key()->representation().IsSmi()) {
__ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize); __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
__ addu(scratch, elements, scratch);
} else { } else {
__ sll(scratch, key, kPointerSizeLog2); __ Lsa(scratch, elements, key, kPointerSizeLog2);
__ addu(scratch, elements, scratch);
} }
} }
__ lw(result, MemOperand(store_base, offset)); __ lw(result, MemOperand(store_base, offset));
@ -3945,8 +3936,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
address = external_pointer; address = external_pointer;
} }
} else { } else {
__ sll(address, key, shift_size); __ Lsa(address, external_pointer, key, shift_size);
__ Addu(address, external_pointer, address);
} }
if (elements_kind == FLOAT32_ELEMENTS) { if (elements_kind == FLOAT32_ELEMENTS) {
@ -4063,11 +4053,9 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
// during bound check elimination with the index argument to the bounds // during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too. // check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsSmi()) { if (instr->hydrogen()->key()->representation().IsSmi()) {
__ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize); __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
__ addu(scratch, elements, scratch);
} else { } else {
__ sll(scratch, key, kPointerSizeLog2); __ Lsa(scratch, elements, key, kPointerSizeLog2);
__ addu(scratch, elements, scratch);
} }
} }
__ sw(value, MemOperand(store_base, offset)); __ sw(value, MemOperand(store_base, offset));
@ -4354,8 +4342,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
__ Branch(deferred->entry(), hi, __ Branch(deferred->entry(), hi,
char_code, Operand(String::kMaxOneByteCharCode)); char_code, Operand(String::kMaxOneByteCharCode));
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ sll(scratch, char_code, kPointerSizeLog2); __ Lsa(result, result, char_code, kPointerSizeLog2);
__ Addu(result, result, scratch);
__ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize)); __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ Branch(deferred->entry(), eq, result, Operand(scratch)); __ Branch(deferred->entry(), eq, result, Operand(scratch));

View File

@ -1140,8 +1140,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the current entry of the array into register a3. // Get the current entry of the array into register a3.
__ lw(a2, MemOperand(sp, 2 * kPointerSize)); __ lw(a2, MemOperand(sp, 2 * kPointerSize));
__ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize); __ Lsa(t0, a2, a0, kPointerSizeLog2 - kSmiTagSize);
__ addu(t0, a2, t0); // Array base + scaled (smi) index.
__ lw(a3, MemOperand(t0)); // Current entry. __ lw(a3, MemOperand(t0)); // Current entry.
// Get the expected map from the stack or a smi in the // Get the expected map from the stack or a smi in the
@ -3798,8 +3797,7 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ mov(string_length, zero_reg); __ mov(string_length, zero_reg);
__ Addu(element, __ Addu(element,
elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sll(elements_end, array_length, kPointerSizeLog2); __ Lsa(elements_end, element, array_length, kPointerSizeLog2);
__ Addu(elements_end, element, elements_end);
// Loop condition: while (element < elements_end). // Loop condition: while (element < elements_end).
// Live values in registers: // Live values in registers:
// elements: Fixed array of strings. // elements: Fixed array of strings.
@ -3876,8 +3874,7 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
// Prepare for looping. Set up elements_end to end of the array. Set // Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first // result_pos to the position of the result where to write the first
// character. // character.
__ sll(elements_end, array_length, kPointerSizeLog2); __ Lsa(elements_end, element, array_length, kPointerSizeLog2);
__ Addu(elements_end, element, elements_end);
result_pos = array_length; // End of live range for array_length. result_pos = array_length; // End of live range for array_length.
array_length = no_reg; array_length = no_reg;
__ Addu(result_pos, __ Addu(result_pos,

View File

@ -230,8 +230,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// The key is a smi. // The key is a smi.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ sll(at, key, kPointerSizeLog2 - kSmiTagSize); __ Lsa(at, scratch1, key, kPointerSizeLog2 - kSmiTagSize);
__ addu(at, at, scratch1);
__ lw(scratch2, MemOperand(at)); __ lw(scratch2, MemOperand(at));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex); __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@ -491,8 +490,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// there may be a callback on the element. // there may be a callback on the element.
Label holecheck_passed1; Label holecheck_passed1;
__ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
__ sll(at, key, kPointerSizeLog2 - kSmiTagSize); __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
__ addu(address, address, at);
__ lw(scratch, MemOperand(address)); __ lw(scratch, MemOperand(address));
__ Branch(&holecheck_passed1, ne, scratch, __ Branch(&holecheck_passed1, ne, scratch,
Operand(masm->isolate()->factory()->the_hole_value())); Operand(masm->isolate()->factory()->the_hole_value()));
@ -511,8 +509,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
} }
// It's irrelevant whether array is smi-only or not when writing a smi. // It's irrelevant whether array is smi-only or not when writing a smi.
__ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize); __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
__ Addu(address, address, scratch);
__ sw(value, MemOperand(address)); __ sw(value, MemOperand(address));
__ Ret(); __ Ret();
@ -528,8 +525,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
} }
__ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize); __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
__ Addu(address, address, scratch);
__ sw(value, MemOperand(address)); __ sw(value, MemOperand(address));
// Update write barrier for the elements array address. // Update write barrier for the elements array address.
__ mov(scratch, value); // Preserve the value which is returned. __ mov(scratch, value); // Preserve the value which is returned.
@ -550,8 +546,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// go to the runtime. // go to the runtime.
__ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize + __ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
kHoleNanUpper32Offset - kHeapObjectTag)); kHoleNanUpper32Offset - kHeapObjectTag));
__ sll(at, key, kPointerSizeLog2); __ Lsa(address, address, key, kPointerSizeLog2);
__ addu(address, address, at);
__ lw(scratch, MemOperand(address)); __ lw(scratch, MemOperand(address));
__ Branch(&fast_double_without_map_check, ne, scratch, __ Branch(&fast_double_without_map_check, ne, scratch,
Operand(kHoleNanUpper32)); Operand(kHoleNanUpper32));

View File

@ -42,13 +42,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
scratch = no_reg; scratch = no_reg;
// Multiply by 3 because there are 3 fields per entry (name, code, map). // Multiply by 3 because there are 3 fields per entry (name, code, map).
__ sll(offset_scratch, offset, 1); __ Lsa(offset_scratch, offset, offset, 1);
__ Addu(offset_scratch, offset_scratch, offset);
// Calculate the base address of the entry. // Calculate the base address of the entry.
__ li(base_addr, Operand(key_offset)); __ li(base_addr, Operand(key_offset));
__ sll(at, offset_scratch, kPointerSizeLog2); __ Lsa(base_addr, base_addr, offset_scratch, kPointerSizeLog2);
__ Addu(base_addr, base_addr, at);
// Check that the key in the entry matches the name. // Check that the key in the entry matches the name.
__ lw(at, MemOperand(base_addr, 0)); __ lw(at, MemOperand(base_addr, 0));

View File

@ -157,8 +157,7 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
{ {
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg)); __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Subu(a0, a0, Operand(1)); __ Subu(a0, a0, Operand(1));
__ sll(a0, a0, kPointerSizeLog2); __ Lsa(sp, sp, a0, kPointerSizeLog2);
__ Addu(sp, a0, sp);
__ lw(a0, MemOperand(sp)); __ lw(a0, MemOperand(sp));
__ Drop(2); __ Drop(2);
} }
@ -194,8 +193,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
Label no_arguments, done; Label no_arguments, done;
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg)); __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Subu(a0, a0, Operand(1)); __ Subu(a0, a0, Operand(1));
__ sll(a0, a0, kPointerSizeLog2); __ Lsa(sp, sp, a0, kPointerSizeLog2);
__ Addu(sp, a0, sp);
__ lw(a0, MemOperand(sp)); __ lw(a0, MemOperand(sp));
__ Drop(2); __ Drop(2);
__ jmp(&done); __ jmp(&done);
@ -259,8 +257,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
{ {
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg)); __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Subu(a0, a0, Operand(1)); __ Subu(a0, a0, Operand(1));
__ sll(a0, a0, kPointerSizeLog2); __ Lsa(sp, sp, a0, kPointerSizeLog2);
__ Addu(sp, a0, sp);
__ lw(a0, MemOperand(sp)); __ lw(a0, MemOperand(sp));
__ Drop(2); __ Drop(2);
} }
@ -322,8 +319,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
Label no_arguments, done; Label no_arguments, done;
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg)); __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Subu(a0, a0, Operand(1)); __ Subu(a0, a0, Operand(1));
__ sll(a0, a0, kPointerSizeLog2); __ Lsa(sp, sp, a0, kPointerSizeLog2);
__ Addu(sp, a0, sp);
__ lw(a0, MemOperand(sp)); __ lw(a0, MemOperand(sp));
__ Drop(2); __ Drop(2);
__ jmp(&done); __ jmp(&done);
@ -618,8 +614,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ SmiTag(t4, a0); __ SmiTag(t4, a0);
__ jmp(&entry); __ jmp(&entry);
__ bind(&loop); __ bind(&loop);
__ sll(t0, t4, kPointerSizeLog2 - kSmiTagSize); __ Lsa(t0, a2, t4, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t0, a2, Operand(t0));
__ lw(t1, MemOperand(t0)); __ lw(t1, MemOperand(t0));
__ push(t1); __ push(t1);
__ bind(&entry); __ bind(&entry);
@ -698,8 +693,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&dont_throw); __ bind(&dont_throw);
} }
__ sll(t0, a1, kPointerSizeLog2 - 1); __ Lsa(sp, sp, a1, kPointerSizeLog2 - 1);
__ Addu(sp, sp, t0);
__ Addu(sp, sp, kPointerSize); __ Addu(sp, sp, kPointerSize);
if (create_implicit_receiver) { if (create_implicit_receiver) {
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2); __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
@ -807,8 +801,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// a3: argc // a3: argc
// s0: argv, i.e. points to first arg // s0: argv, i.e. points to first arg
Label loop, entry; Label loop, entry;
__ sll(t0, a3, kPointerSizeLog2); __ Lsa(t2, s0, a3, kPointerSizeLog2);
__ addu(t2, s0, t0);
__ b(&entry); __ b(&entry);
__ nop(); // Branch delay slot nop. __ nop(); // Branch delay slot nop.
// t2 points past last arg. // t2 points past last arg.
@ -965,8 +958,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Addu(a0, kInterpreterBytecodeArrayRegister, __ Addu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister); kInterpreterBytecodeOffsetRegister);
__ lbu(a0, MemOperand(a0)); __ lbu(a0, MemOperand(a0));
__ sll(at, a0, kPointerSizeLog2); __ Lsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
__ Addu(at, kInterpreterDispatchTableRegister, at);
__ lw(at, MemOperand(at)); __ lw(at, MemOperand(at));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal. // and header removal.
@ -1119,8 +1111,7 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
__ Addu(a1, kInterpreterBytecodeArrayRegister, __ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister); kInterpreterBytecodeOffsetRegister);
__ lbu(a1, MemOperand(a1)); __ lbu(a1, MemOperand(a1));
__ sll(a1, a1, kPointerSizeLog2); __ Lsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
__ Addu(a1, kInterpreterDispatchTableRegister, a1);
__ lw(a1, MemOperand(a1)); __ lw(a1, MemOperand(a1));
__ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a1); __ Jump(a1);
@ -1407,8 +1398,7 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// Do the compatible receiver check. // Do the compatible receiver check.
Label receiver_check_failed; Label receiver_check_failed;
__ sll(at, a0, kPointerSizeLog2); __ Lsa(t8, sp, a0, kPointerSizeLog2);
__ Addu(t8, sp, at);
__ lw(t0, MemOperand(t8)); __ lw(t0, MemOperand(t8));
CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed); CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed);
@ -1542,6 +1532,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register scratch = t0; Register scratch = t0;
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex); __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ mov(a3, a2); __ mov(a3, a2);
// Lsa() cannot be used hare as scratch value used later.
__ sll(scratch, a0, kPointerSizeLog2); __ sll(scratch, a0, kPointerSizeLog2);
__ Addu(a0, sp, Operand(scratch)); __ Addu(a0, sp, Operand(scratch));
__ lw(a1, MemOperand(a0)); // receiver __ lw(a1, MemOperand(a0)); // receiver
@ -1612,8 +1603,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack. // 2. Get the function to call (passed as receiver) from the stack.
// a0: actual number of arguments // a0: actual number of arguments
__ sll(at, a0, kPointerSizeLog2); __ Lsa(at, sp, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ lw(a1, MemOperand(at)); __ lw(a1, MemOperand(at));
// 3. Shift arguments and return address one slot down on the stack // 3. Shift arguments and return address one slot down on the stack
@ -1624,8 +1614,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
{ {
Label loop; Label loop;
// Calculate the copy start address (destination). Copy end address is sp. // Calculate the copy start address (destination). Copy end address is sp.
__ sll(at, a0, kPointerSizeLog2); __ Lsa(a2, sp, a0, kPointerSizeLog2);
__ addu(a2, sp, at);
__ bind(&loop); __ bind(&loop);
__ lw(at, MemOperand(a2, -kPointerSize)); __ lw(at, MemOperand(a2, -kPointerSize));
@ -1725,6 +1714,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Register scratch = t0; Register scratch = t0;
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex); __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ mov(a2, a1); __ mov(a2, a1);
// Lsa() cannot be used hare as scratch value used later.
__ sll(scratch, a0, kPointerSizeLog2); __ sll(scratch, a0, kPointerSizeLog2);
__ Addu(a0, sp, Operand(scratch)); __ Addu(a0, sp, Operand(scratch));
__ sw(a2, MemOperand(a0)); // receiver __ sw(a2, MemOperand(a0)); // receiver
@ -1826,8 +1816,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
kPointerSize))); kPointerSize)));
__ mov(sp, fp); __ mov(sp, fp);
__ MultiPop(fp.bit() | ra.bit()); __ MultiPop(fp.bit() | ra.bit());
__ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize); __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
__ Addu(sp, sp, t0);
// Adjust for the receiver. // Adjust for the receiver.
__ Addu(sp, sp, Operand(kPointerSize)); __ Addu(sp, sp, Operand(kPointerSize));
} }
@ -1935,8 +1924,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
Label done, loop; Label done, loop;
__ bind(&loop); __ bind(&loop);
__ Branch(&done, eq, t0, Operand(a2)); __ Branch(&done, eq, t0, Operand(a2));
__ sll(at, t0, kPointerSizeLog2); __ Lsa(at, a0, t0, kPointerSizeLog2);
__ Addu(at, a0, at);
__ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize)); __ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize));
__ Push(at); __ Push(at);
__ Addu(t0, t0, Operand(1)); __ Addu(t0, t0, Operand(1));
@ -1999,8 +1987,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(a3); __ LoadGlobalProxy(a3);
} else { } else {
Label convert_to_object, convert_receiver; Label convert_to_object, convert_receiver;
__ sll(at, a0, kPointerSizeLog2); __ Lsa(at, sp, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ lw(a3, MemOperand(at)); __ lw(a3, MemOperand(at));
__ JumpIfSmi(a3, &convert_to_object); __ JumpIfSmi(a3, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
@ -2036,8 +2023,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver); __ bind(&convert_receiver);
} }
__ sll(at, a0, kPointerSizeLog2); __ Lsa(at, sp, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ sw(a3, MemOperand(at)); __ sw(a3, MemOperand(at));
} }
__ bind(&done_convert); __ bind(&done_convert);
@ -2078,8 +2064,7 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]]. // Patch the receiver to [[BoundThis]].
{ {
__ lw(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset)); __ lw(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
__ sll(t0, a0, kPointerSizeLog2); __ Lsa(t0, sp, a0, kPointerSizeLog2);
__ addu(t0, t0, sp);
__ sw(at, MemOperand(t0)); __ sw(at, MemOperand(t0));
} }
@ -2120,11 +2105,9 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
__ mov(t1, zero_reg); __ mov(t1, zero_reg);
__ bind(&loop); __ bind(&loop);
__ Branch(&done_loop, gt, t1, Operand(a0)); __ Branch(&done_loop, gt, t1, Operand(a0));
__ sll(t2, t0, kPointerSizeLog2); __ Lsa(t2, sp, t0, kPointerSizeLog2);
__ addu(t2, t2, sp);
__ lw(at, MemOperand(t2)); __ lw(at, MemOperand(t2));
__ sll(t2, t1, kPointerSizeLog2); __ Lsa(t2, sp, t1, kPointerSizeLog2);
__ addu(t2, t2, sp);
__ sw(at, MemOperand(t2)); __ sw(at, MemOperand(t2));
__ Addu(t0, t0, Operand(1)); __ Addu(t0, t0, Operand(1));
__ Addu(t1, t1, Operand(1)); __ Addu(t1, t1, Operand(1));
@ -2141,11 +2124,9 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
__ bind(&loop); __ bind(&loop);
__ Subu(t0, t0, Operand(1)); __ Subu(t0, t0, Operand(1));
__ Branch(&done_loop, lt, t0, Operand(zero_reg)); __ Branch(&done_loop, lt, t0, Operand(zero_reg));
__ sll(t1, t0, kPointerSizeLog2); __ Lsa(t1, a2, t0, kPointerSizeLog2);
__ addu(t1, t1, a2);
__ lw(at, MemOperand(t1)); __ lw(at, MemOperand(t1));
__ sll(t1, a0, kPointerSizeLog2); __ Lsa(t1, sp, a0, kPointerSizeLog2);
__ addu(t1, t1, sp);
__ sw(at, MemOperand(t1)); __ sw(at, MemOperand(t1));
__ Addu(a0, a0, Operand(1)); __ Addu(a0, a0, Operand(1));
__ Branch(&loop); __ Branch(&loop);
@ -2196,8 +2177,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ And(t1, t1, Operand(1 << Map::kIsCallable)); __ And(t1, t1, Operand(1 << Map::kIsCallable));
__ Branch(&non_callable, eq, t1, Operand(zero_reg)); __ Branch(&non_callable, eq, t1, Operand(zero_reg));
// Overwrite the original receiver with the (original) target. // Overwrite the original receiver with the (original) target.
__ sll(at, a0, kPointerSizeLog2); __ Lsa(at, sp, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ sw(a1, MemOperand(at)); __ sw(a1, MemOperand(at));
// Let the "call_as_function_delegate" take care of the rest. // Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1); __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
@ -2284,11 +2264,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ mov(t1, zero_reg); __ mov(t1, zero_reg);
__ bind(&loop); __ bind(&loop);
__ Branch(&done_loop, ge, t1, Operand(a0)); __ Branch(&done_loop, ge, t1, Operand(a0));
__ sll(t2, t0, kPointerSizeLog2); __ Lsa(t2, sp, t0, kPointerSizeLog2);
__ addu(t2, t2, sp);
__ lw(at, MemOperand(t2)); __ lw(at, MemOperand(t2));
__ sll(t2, t1, kPointerSizeLog2); __ Lsa(t2, sp, t1, kPointerSizeLog2);
__ addu(t2, t2, sp);
__ sw(at, MemOperand(t2)); __ sw(at, MemOperand(t2));
__ Addu(t0, t0, Operand(1)); __ Addu(t0, t0, Operand(1));
__ Addu(t1, t1, Operand(1)); __ Addu(t1, t1, Operand(1));
@ -2305,11 +2283,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&loop); __ bind(&loop);
__ Subu(t0, t0, Operand(1)); __ Subu(t0, t0, Operand(1));
__ Branch(&done_loop, lt, t0, Operand(zero_reg)); __ Branch(&done_loop, lt, t0, Operand(zero_reg));
__ sll(t1, t0, kPointerSizeLog2); __ Lsa(t1, a2, t0, kPointerSizeLog2);
__ addu(t1, t1, a2);
__ lw(at, MemOperand(t1)); __ lw(at, MemOperand(t1));
__ sll(t1, a0, kPointerSizeLog2); __ Lsa(t1, sp, a0, kPointerSizeLog2);
__ addu(t1, t1, sp);
__ sw(at, MemOperand(t1)); __ sw(at, MemOperand(t1));
__ Addu(a0, a0, Operand(1)); __ Addu(a0, a0, Operand(1));
__ Branch(&loop); __ Branch(&loop);
@ -2388,8 +2364,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an exotic Object with a [[Construct]] internal method. // Called Construct on an exotic Object with a [[Construct]] internal method.
{ {
// Overwrite the original receiver with the (original) target. // Overwrite the original receiver with the (original) target.
__ sll(at, a0, kPointerSizeLog2); __ Lsa(at, sp, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ sw(a1, MemOperand(at)); __ sw(a1, MemOperand(at));
// Let the "call_as_constructor_delegate" take care of the rest. // Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1); __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
@ -2432,8 +2407,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ArgumentAdaptorStackCheck(masm, &stack_overflow); ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t1. // Calculate copy start address into a0 and copy end address into t1.
__ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize); __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a0, fp, a0);
// Adjust for return address and receiver. // Adjust for return address and receiver.
__ Addu(a0, a0, Operand(2 * kPointerSize)); __ Addu(a0, a0, Operand(2 * kPointerSize));
// Compute copy end address. // Compute copy end address.
@ -2488,8 +2462,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a1: function // a1: function
// a2: expected number of arguments // a2: expected number of arguments
// a3: new target (passed through to callee) // a3: new target (passed through to callee)
__ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize); __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a0, fp, a0);
// Adjust for return address and receiver. // Adjust for return address and receiver.
__ Addu(a0, a0, Operand(2 * kPointerSize)); __ Addu(a0, a0, Operand(2 * kPointerSize));
// Compute copy end address. Also adjust for return address. // Compute copy end address. Also adjust for return address.

View File

@ -1075,8 +1075,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ mov(s1, a2); __ mov(s1, a2);
} else { } else {
// Compute the argv pointer in a callee-saved register. // Compute the argv pointer in a callee-saved register.
__ sll(s1, a0, kPointerSizeLog2); __ Lsa(s1, sp, a0, kPointerSizeLog2);
__ Addu(s1, sp, s1);
__ Subu(s1, s1, kPointerSize); __ Subu(s1, s1, kPointerSize);
} }
@ -1612,8 +1611,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Read the argument from the stack and return it. // Read the argument from the stack and return it.
__ subu(a3, a0, a1); __ subu(a3, a0, a1);
__ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); __ Lsa(a3, fp, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, fp, Operand(t3));
__ Ret(USE_DELAY_SLOT); __ Ret(USE_DELAY_SLOT);
__ lw(v0, MemOperand(a3, kDisplacement)); __ lw(v0, MemOperand(a3, kDisplacement));
@ -1626,8 +1624,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Read the argument from the adaptor frame and return it. // Read the argument from the adaptor frame and return it.
__ subu(a3, a0, a1); __ subu(a3, a0, a1);
__ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); __ Lsa(a3, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, a2, Operand(t3));
__ Ret(USE_DELAY_SLOT); __ Ret(USE_DELAY_SLOT);
__ lw(v0, MemOperand(a3, kDisplacement)); __ lw(v0, MemOperand(a3, kDisplacement));
@ -1657,8 +1654,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer in the current frame. // Patch the arguments.length and the parameters pointer in the current frame.
__ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ sll(t3, a2, 1); __ Lsa(t0, t0, a2, 1);
__ Addu(t0, t0, Operand(t3));
__ addiu(a3, t0, StandardFrameConstants::kCallerSPOffset); __ addiu(a3, t0, StandardFrameConstants::kCallerSPOffset);
__ bind(&runtime); __ bind(&runtime);
@ -1694,8 +1690,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// We have an adaptor frame. Patch the parameters pointer. // We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame); __ bind(&adaptor_frame);
__ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ sll(t6, t1, 1); __ Lsa(t0, t0, t1, 1);
__ Addu(t0, t0, Operand(t6));
__ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset)); __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
// t1 = argument count (tagged) // t1 = argument count (tagged)
@ -1721,8 +1716,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ bind(&param_map_size); __ bind(&param_map_size);
// 2. Backing store. // 2. Backing store.
__ sll(t6, t1, 1); __ Lsa(t5, t5, t1, 1);
__ Addu(t5, t5, Operand(t6));
__ Addu(t5, t5, Operand(FixedArray::kHeaderSize)); __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
// 3. Arguments object. // 3. Arguments object.
@ -1798,8 +1792,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Addu(t1, t2, Operand(Smi::FromInt(2))); __ Addu(t1, t2, Operand(Smi::FromInt(2)));
__ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
__ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize)); __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
__ sll(t6, t2, 1); __ Lsa(t1, t0, t2, 1);
__ Addu(t1, t0, Operand(t6));
__ Addu(t1, t1, Operand(kParameterMapHeaderSize)); __ Addu(t1, t1, Operand(kParameterMapHeaderSize));
__ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize)); __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
@ -1816,8 +1809,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ Subu(t5, t5, Operand(t2)); __ Subu(t5, t5, Operand(t2));
__ LoadRoot(t3, Heap::kTheHoleValueRootIndex); __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
__ sll(t6, t1, 1); __ Lsa(a1, t0, t1, 1);
__ Addu(a1, t0, Operand(t6));
__ Addu(a1, a1, Operand(kParameterMapHeaderSize)); __ Addu(a1, a1, Operand(kParameterMapHeaderSize));
// a1 = address of backing store (tagged) // a1 = address of backing store (tagged)
@ -1862,8 +1854,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ bind(&arguments_loop); __ bind(&arguments_loop);
__ Subu(a3, a3, Operand(kPointerSize)); __ Subu(a3, a3, Operand(kPointerSize));
__ lw(t0, MemOperand(a3, 0)); __ lw(t0, MemOperand(a3, 0));
__ sll(t6, t2, 1); __ Lsa(t5, a1, t2, 1);
__ Addu(t5, a1, Operand(t6));
__ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize)); __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize));
__ Addu(t2, t2, Operand(Smi::FromInt(1))); __ Addu(t2, t2, Operand(Smi::FromInt(1)));
@ -1922,8 +1913,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer. // Patch the arguments.length and the parameters pointer.
__ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ sll(at, a2, kPointerSizeLog2 - kSmiTagSize); __ Lsa(t0, t0, a2, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t0, t0, Operand(at));
__ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset)); __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
// Try the new space allocation. Start out with computing the size // Try the new space allocation. Start out with computing the size
@ -2008,8 +1998,7 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer. // Patch the arguments.length and the parameters pointer.
__ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ sll(t1, a2, kPointerSizeLog2 - kSmiTagSize); __ Lsa(a3, t0, a2, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, t0, Operand(t1));
__ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
// Do the runtime call to allocate the arguments object. // Do the runtime call to allocate the arguments object.
@ -2489,8 +2478,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
masm->isolate()->heap()->uninitialized_symbol()); masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into t2. // Load the cache state into t2.
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t2, a2, Operand(t2));
__ lw(t2, FieldMemOperand(t2, FixedArray::kHeaderSize)); __ lw(t2, FieldMemOperand(t2, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the // A monomorphic cache hit or an already megamorphic state: invoke the
@ -2534,8 +2522,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// MegamorphicSentinel is an immortal immovable object (undefined) so no // MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed. // write-barrier is needed.
__ bind(&megamorphic); __ bind(&megamorphic);
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t2, a2, Operand(t2));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize)); __ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize));
__ jmp(&done); __ jmp(&done);
@ -2575,8 +2562,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
GenerateRecordCallTarget(masm); GenerateRecordCallTarget(masm);
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); __ Lsa(t1, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t1, a2, at);
Label feedback_register_initialized; Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into a2, or undefined. // Put the AllocationSite from the feedback vector into a2, or undefined.
__ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize)); __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
@ -2615,8 +2601,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ li(a0, Operand(arg_count())); __ li(a0, Operand(arg_count()));
// Increment the call count for monomorphic function calls. // Increment the call count for monomorphic function calls.
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(at, a2, Operand(at));
__ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
__ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
@ -2637,8 +2622,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
ParameterCount actual(argc); ParameterCount actual(argc);
// The checks. First, does r1 match the recorded monomorphic target? // The checks. First, does r1 match the recorded monomorphic target?
__ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t0, a2, Operand(t0));
__ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize)); __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
// We don't know that we have a weak cell. We might have a private symbol // We don't know that we have a weak cell. We might have a private symbol
@ -2663,8 +2647,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(a1, &extra_checks_or_miss); __ JumpIfSmi(a1, &extra_checks_or_miss);
// Increment the call count for monomorphic function calls. // Increment the call count for monomorphic function calls.
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(at, a2, Operand(at));
__ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
__ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
@ -2704,8 +2687,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ AssertNotSmi(t0); __ AssertNotSmi(t0);
__ GetObjectType(t0, t1, t1); __ GetObjectType(t0, t1, t1);
__ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE)); __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
__ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t0, a2, Operand(t0));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize)); __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
@ -2736,8 +2718,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Branch(&miss, ne, t0, Operand(t1)); __ Branch(&miss, ne, t0, Operand(t1));
// Initialize the call counter. // Initialize the call counter.
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(at, a2, Operand(at));
__ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
@ -2901,8 +2882,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged one-byte char code. // At this point code register contains smi tagged one-byte char code.
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize); __ Lsa(result_, result_, code_, kPointerSizeLog2 - kSmiTagSize);
__ Addu(result_, result_, t0);
__ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex); __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
__ Branch(&slow_case_, eq, result_, Operand(t0)); __ Branch(&slow_case_, eq, result_, Operand(t0));
@ -3159,8 +3139,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Locate first character of substring to copy. // Locate first character of substring to copy.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ sll(t0, a3, 1); __ Lsa(t1, t1, a3, 1);
__ Addu(t1, t1, t0);
// Locate first character of result. // Locate first character of result.
__ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@ -3895,15 +3874,13 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Scale the index by multiplying by the entry size. // Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3); STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ sll(at, index, 1); __ Lsa(index, index, index, 1);
__ Addu(index, index, at);
Register entity_name = scratch0; Register entity_name = scratch0;
// Having undefined at this place means the name is not contained. // Having undefined at this place means the name is not contained.
STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTagSize == 1);
Register tmp = properties; Register tmp = properties;
__ sll(scratch0, index, 1); __ Lsa(tmp, properties, index, 1);
__ Addu(tmp, properties, scratch0);
__ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
DCHECK(!tmp.is(entity_name)); DCHECK(!tmp.is(entity_name));
@ -3993,12 +3970,10 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
STATIC_ASSERT(NameDictionary::kEntrySize == 3); STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3. // scratch2 = scratch2 * 3.
__ sll(at, scratch2, 1); __ Lsa(scratch2, scratch2, scratch2, 1);
__ Addu(scratch2, scratch2, at);
// Check if the key is identical to the name. // Check if the key is identical to the name.
__ sll(at, scratch2, 2); __ Lsa(scratch2, elements, scratch2, 2);
__ Addu(scratch2, elements, at);
__ lw(at, FieldMemOperand(scratch2, kElementsStartOffset)); __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
__ Branch(done, eq, name, Operand(at)); __ Branch(done, eq, name, Operand(at));
} }
@ -4080,13 +4055,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(NameDictionary::kEntrySize == 3); STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// index *= 3. // index *= 3.
__ mov(at, index); __ mov(at, index);
__ sll(index, index, 1); __ Lsa(index, index, index, 1);
__ Addu(index, index, at);
STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTagSize == 1);
__ sll(index, index, 2); __ Lsa(index, dictionary, index, 2);
__ Addu(index, index, dictionary);
__ lw(entry_key, FieldMemOperand(index, kElementsStartOffset)); __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
// Having undefined at this place means the name is not contained. // Having undefined at this place means the name is not contained.
@ -4410,8 +4383,7 @@ static void HandleArrayCases(MacroAssembler* masm, Register feedback,
// aka feedback scratch2 // aka feedback scratch2
// also need receiver_map // also need receiver_map
// use cached_map (scratch1) to look in the weak map values. // use cached_map (scratch1) to look in the weak map values.
__ sll(at, length, kPointerSizeLog2 - kSmiTagSize); __ Lsa(too_far, feedback, length, kPointerSizeLog2 - kSmiTagSize);
__ Addu(too_far, feedback, Operand(at));
__ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Addu(pointer_reg, feedback, __ Addu(pointer_reg, feedback,
Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag)); Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
@ -4447,8 +4419,7 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Branch(try_array, ne, cached_map, Operand(receiver_map)); __ Branch(try_array, ne, cached_map, Operand(receiver_map));
Register handler = feedback; Register handler = feedback;
__ sll(at, slot, kPointerSizeLog2 - kSmiTagSize); __ Lsa(handler, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ Addu(handler, vector, Operand(at));
__ lw(handler, __ lw(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize)); FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
@ -4465,8 +4436,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver_map = t1; Register receiver_map = t1;
Register scratch1 = t4; Register scratch1 = t4;
__ sll(at, slot, kPointerSizeLog2 - kSmiTagSize); __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ Addu(feedback, vector, Operand(at));
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure // Try to quickly handle the monomorphic case without knowing for sure
@ -4521,8 +4491,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver_map = t1; Register receiver_map = t1;
Register scratch1 = t4; Register scratch1 = t4;
__ sll(at, slot, kPointerSizeLog2 - kSmiTagSize); __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ Addu(feedback, vector, Operand(at));
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure // Try to quickly handle the monomorphic case without knowing for sure
@ -4558,8 +4527,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Branch(&miss, ne, key, Operand(feedback)); __ Branch(&miss, ne, key, Operand(feedback));
// If the name comparison succeeded, we know we have a fixed array with // If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair. // at least one map/handler pair.
__ sll(at, slot, kPointerSizeLog2 - kSmiTagSize); __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ Addu(feedback, vector, Operand(at));
__ lw(feedback, __ lw(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, false, &miss); HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, false, &miss);
@ -4607,8 +4575,7 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver_map = t2; Register receiver_map = t2;
Register scratch1 = t5; Register scratch1 = t5;
__ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize); __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ Addu(feedback, vector, Operand(scratch1));
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure // Try to quickly handle the monomorphic case without knowing for sure
@ -4680,8 +4647,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
// aka feedback scratch2 // aka feedback scratch2
// also need receiver_map // also need receiver_map
// use cached_map (scratch1) to look in the weak map values. // use cached_map (scratch1) to look in the weak map values.
__ sll(scratch1, too_far, kPointerSizeLog2 - kSmiTagSize); __ Lsa(too_far, feedback, too_far, kPointerSizeLog2 - kSmiTagSize);
__ Addu(too_far, feedback, Operand(scratch1));
__ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Addu(pointer_reg, feedback, __ Addu(pointer_reg, feedback,
Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag)); Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
@ -4730,8 +4696,7 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver_map = t2; Register receiver_map = t2;
Register scratch1 = t5; Register scratch1 = t5;
__ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize); __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ Addu(feedback, vector, Operand(scratch1));
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure // Try to quickly handle the monomorphic case without knowing for sure
@ -4770,8 +4735,7 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Branch(&miss, ne, key, Operand(feedback)); __ Branch(&miss, ne, key, Operand(feedback));
// If the name comparison succeeded, we know we have a fixed array with // If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair. // at least one map/handler pair.
__ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize); __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ Addu(feedback, vector, Operand(scratch1));
__ lw(feedback, __ lw(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false, HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
@ -5078,8 +5042,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
switch (argument_count()) { switch (argument_count()) {
case ANY: case ANY:
case MORE_THAN_ONE: case MORE_THAN_ONE:
__ sll(at, a0, kPointerSizeLog2); __ Lsa(at, sp, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ sw(a1, MemOperand(at)); __ sw(a1, MemOperand(at));
__ li(at, Operand(3)); __ li(at, Operand(3));
__ addu(a0, a0, at); __ addu(a0, a0, at);
@ -5185,8 +5148,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
} }
// Load the PropertyCell value at the specified slot. // Load the PropertyCell value at the specified slot.
__ sll(at, slot_reg, kPointerSizeLog2); __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2);
__ Addu(at, at, Operand(context_reg));
__ lw(result_reg, ContextMemOperand(at, 0)); __ lw(result_reg, ContextMemOperand(at, 0));
__ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset)); __ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
@ -5224,8 +5186,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
} }
// Load the PropertyCell at the specified slot. // Load the PropertyCell at the specified slot.
__ sll(at, slot_reg, kPointerSizeLog2); __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2);
__ Addu(at, at, Operand(context_reg));
__ lw(cell_reg, ContextMemOperand(at, 0)); __ lw(cell_reg, ContextMemOperand(at, 0));
// Load PropertyDetails for the cell (actually only the cell_type and kind). // Load PropertyDetails for the cell (actually only the cell_type and kind).

View File

@ -767,8 +767,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ Addu(scratch1, elements, __ Addu(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Addu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize)); __ Addu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
__ sll(at, length, 2); __ Lsa(array_end, scratch3, length, 2);
__ Addu(array_end, scratch3, at);
// Repurpose registers no longer in use. // Repurpose registers no longer in use.
Register hole_lower = elements; Register hole_lower = elements;
@ -899,8 +898,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
FixedDoubleArray::kHeaderSize - kHeapObjectTag FixedDoubleArray::kHeaderSize - kHeapObjectTag
+ Register::kExponentOffset)); + Register::kExponentOffset));
__ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize)); __ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize));
__ sll(dst_end, dst_end, 1); __ Lsa(dst_end, dst_elements, dst_end, 1);
__ Addu(dst_end, dst_elements, dst_end);
// Allocating heap numbers in the loop below can fail and cause a jump to // Allocating heap numbers in the loop below can fail and cause a jump to
// gc_required. We can't leave a partly initialized FixedArray behind, // gc_required. We can't leave a partly initialized FixedArray behind,
@ -1082,8 +1080,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ And(at, result, Operand(kStringEncodingMask)); __ And(at, result, Operand(kStringEncodingMask));
__ Branch(&one_byte, ne, at, Operand(zero_reg)); __ Branch(&one_byte, ne, at, Operand(zero_reg));
// Two-byte string. // Two-byte string.
__ sll(at, index, 1); __ Lsa(at, string, index, 1);
__ Addu(at, string, at);
__ lhu(result, MemOperand(at)); __ lhu(result, MemOperand(at));
__ jmp(&done); __ jmp(&done);
__ bind(&one_byte); __ bind(&one_byte);
@ -1156,8 +1153,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
// Must not call ExpConstant() after overwriting temp3! // Must not call ExpConstant() after overwriting temp3!
__ li(temp3, Operand(ExternalReference::math_exp_log_table())); __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
__ sll(at, temp2, 3); __ Lsa(temp3, temp3, temp2, 3);
__ Addu(temp3, temp3, Operand(at));
__ lw(temp2, MemOperand(temp3, Register::kMantissaOffset)); __ lw(temp2, MemOperand(temp3, Register::kMantissaOffset));
__ lw(temp3, MemOperand(temp3, Register::kExponentOffset)); __ lw(temp3, MemOperand(temp3, Register::kExponentOffset));
// The first word is loaded is the lower number register. // The first word is loaded is the lower number register.

View File

@ -268,8 +268,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// a1 = one past the last FrameDescription**. // a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset())); __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_. __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
__ sll(a1, a1, kPointerSizeLog2); // Count to offset. __ Lsa(a1, t0, a1, kPointerSizeLog2);
__ addu(a1, t0, a1); // a1 = one past the last FrameDescription**.
__ BranchShort(&outer_loop_header); __ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop); __ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index. // Inner loop state: a2 = current FrameDescription*, a3 = loop index.

View File

@ -499,16 +499,14 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// //
// hash = ~hash + (hash << 15); // hash = ~hash + (hash << 15);
nor(scratch, reg0, zero_reg); nor(scratch, reg0, zero_reg);
sll(at, reg0, 15); Lsa(reg0, scratch, reg0, 15);
addu(reg0, scratch, at);
// hash = hash ^ (hash >> 12); // hash = hash ^ (hash >> 12);
srl(at, reg0, 12); srl(at, reg0, 12);
xor_(reg0, reg0, at); xor_(reg0, reg0, at);
// hash = hash + (hash << 2); // hash = hash + (hash << 2);
sll(at, reg0, 2); Lsa(reg0, reg0, reg0, 2);
addu(reg0, reg0, at);
// hash = hash ^ (hash >> 4); // hash = hash ^ (hash >> 4);
srl(at, reg0, 4); srl(at, reg0, 4);
@ -516,8 +514,7 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// hash = hash * 2057; // hash = hash * 2057;
sll(scratch, reg0, 11); sll(scratch, reg0, 11);
sll(at, reg0, 3); Lsa(reg0, reg0, reg0, 3);
addu(reg0, reg0, at);
addu(reg0, reg0, scratch); addu(reg0, reg0, scratch);
// hash = hash ^ (hash >> 16); // hash = hash ^ (hash >> 16);
@ -577,12 +574,10 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Scale the index by multiplying by the element size. // Scale the index by multiplying by the element size.
DCHECK(SeededNumberDictionary::kEntrySize == 3); DCHECK(SeededNumberDictionary::kEntrySize == 3);
sll(at, reg2, 1); // 2x. Lsa(reg2, reg2, reg2, 1); // reg2 = reg2 * 3.
addu(reg2, reg2, at); // reg2 = reg2 * 3.
// Check if the key is identical to the name. // Check if the key is identical to the name.
sll(at, reg2, kPointerSizeLog2); Lsa(reg2, elements, reg2, kPointerSizeLog2);
addu(reg2, elements, at);
lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset)); lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
if (i != kNumberDictionaryProbes - 1) { if (i != kNumberDictionaryProbes - 1) {
@ -3412,8 +3407,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// to calculate the new top. Object size may be in words so a shift is // to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes. // required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) { if ((flags & SIZE_IN_WORDS) != 0) {
sll(result_end, object_size, kPointerSizeLog2); Lsa(result_end, result, object_size, kPointerSizeLog2);
Addu(result_end, result, result_end);
} else { } else {
Addu(result_end, result, Operand(object_size)); Addu(result_end, result, Operand(object_size));
} }
@ -3775,8 +3769,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
bind(&have_double_value); bind(&have_double_value);
sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize); Lsa(scratch1, elements_reg, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, elements_reg);
sw(mantissa_reg, sw(mantissa_reg,
FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
+ kHoleNanLower32Offset)); + kHoleNanLower32Offset));
@ -3802,8 +3795,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Addu(scratch1, elements_reg, Addu(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
elements_offset)); elements_offset));
sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); Lsa(scratch1, scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element // scratch1 is now effective address of the double element
Register untagged_value = scratch2; Register untagged_value = scratch2;
@ -4945,8 +4937,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count_is_length) { if (argument_count_is_length) {
addu(sp, sp, argument_count); addu(sp, sp, argument_count);
} else { } else {
sll(t8, argument_count, kPointerSizeLog2); Lsa(sp, sp, argument_count, kPointerSizeLog2, t8);
addu(sp, sp, t8);
} }
} }
@ -5473,8 +5464,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits); Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
sll(t8, t8, kPointerSizeLog2); Lsa(bitmap_reg, bitmap_reg, t8, kPointerSizeLog2, t8);
Addu(bitmap_reg, bitmap_reg, t8);
li(t8, Operand(1)); li(t8, Operand(1));
sllv(mask_reg, t8, mask_reg); sllv(mask_reg, t8, mask_reg);
} }