diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc index 315b664210..84078342b2 100644 --- a/src/builtins/ppc/builtins-ppc.cc +++ b/src/builtins/ppc/builtins-ppc.cc @@ -871,7 +871,8 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, r0); __ LoadS32( scratch, - FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); + FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset), + r0); __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0); __ bne(&heal_optimized_code_slot, cr0); @@ -1170,7 +1171,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ LoadS32(r8, FieldMemOperand( kInterpreterBytecodeArrayRegister, - BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); + BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset), + r0); __ cmpi(r8, Operand::Zero()); __ beq(&no_incoming_new_target_or_generator_register); __ ShiftLeftImm(r8, r8, Operand(kSystemPointerSizeLog2)); diff --git a/src/codegen/ppc/macro-assembler-ppc.cc b/src/codegen/ppc/macro-assembler-ppc.cc index 92277a90e4..bac684c5a5 100644 --- a/src/codegen/ppc/macro-assembler-ppc.cc +++ b/src/codegen/ppc/macro-assembler-ppc.cc @@ -2766,56 +2766,21 @@ void TurboAssembler::StoreU64WithUpdate(Register src, const MemOperand& mem, void TurboAssembler::LoadS32(Register dst, const MemOperand& mem, Register scratch) { - int offset = mem.offset(); - - if (!is_int16(offset)) { - CHECK(scratch != no_reg); - mov(scratch, Operand(offset)); - lwax(dst, MemOperand(mem.ra(), scratch)); - } else { - int misaligned = (offset & 3); - if (misaligned) { - // adjust base to conform to offset alignment requirements - // Todo: enhance to use scratch if dst is unsuitable - CHECK(dst != r0); - addi(dst, mem.ra(), Operand((offset & 3) - 4)); - lwa(dst, MemOperand(dst, (offset & ~3) + 4)); - } else { - lwa(dst, mem); - } - } + GenerateMemoryOperationWithAlign(dst, mem, lwa, lwax); } // Variable length depending on whether offset fits into immediate field // MemOperand currently only supports d-form void TurboAssembler::LoadU32(Register dst, const MemOperand& mem, Register scratch) { - Register base = mem.ra(); - int offset = mem.offset(); - - if (!is_int16(offset)) { - CHECK(scratch != no_reg); - mov(scratch, Operand(offset)); - lwzx(dst, MemOperand(base, scratch)); - } else { - // lwz can handle offset misalign - lwz(dst, mem); - } + GenerateMemoryOperation(dst, mem, lwz, lwzx); } // Variable length depending on whether offset fits into immediate field // MemOperand current only supports d-form void TurboAssembler::StoreU32(Register src, const MemOperand& mem, Register scratch) { - Register base = mem.ra(); - int offset = mem.offset(); - - if (!is_int16(offset)) { - LoadIntLiteral(scratch, offset); - stwx(src, MemOperand(base, scratch)); - } else { - stw(src, mem); - } + GenerateMemoryOperation(src, mem, stw, stwx); } void TurboAssembler::LoadS16(Register dst, const MemOperand& mem, @@ -3253,7 +3218,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination, // Check whether the Code object is an off-heap trampoline. If so, call its // (off-heap) entry point directly without going through the (on-heap) // trampoline. Otherwise, just call the Code object as always. - LoadS32(scratch, FieldMemOperand(code_object, Code::kFlagsOffset)); + LoadS32(scratch, FieldMemOperand(code_object, Code::kFlagsOffset), r0); mov(r0, Operand(Code::IsOffHeapTrampoline::kMask)); and_(r0, scratch, r0, SetRC); bne(&if_code_is_off_heap, cr0); @@ -3266,7 +3231,8 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination, // An off-heap trampoline, the entry point is loaded from the builtin entry // table. bind(&if_code_is_off_heap); - LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset)); + LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset), + r0); ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2)); add(destination, destination, kRootRegister); LoadU64(destination, diff --git a/src/compiler/backend/ppc/code-generator-ppc.cc b/src/compiler/backend/ppc/code-generator-ppc.cc index ab2506d2ff..363f1f8013 100644 --- a/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/src/compiler/backend/ppc/code-generator-ppc.cc @@ -829,7 +829,8 @@ void CodeGenerator::BailoutIfDeoptimized() { __ LoadTaggedPointerField( r11, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0); __ LoadS32(r11, - FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset)); + FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset), + r0); __ TestBit(r11, Code::kMarkedForDeoptimizationBit); __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), RelocInfo::CODE_TARGET, ne, cr0);