MIPS: Move ldc1/sdc1 to macro-assembler.
For MIPS32, instructions ldc1 and sdc1 are moved into macro-assembler and renamed as Ldc1 and Sdc1. The reason for placing them into macro-assembler is that they emmit two or three instructions. TEST=test/cctest/test-assembler-mips, test/cctest/test-code-stubs-mips, test/cctest/test-macro-assembler-mips BUG= Review-Url: https://codereview.chromium.org/2751973002 Cr-Commit-Position: refs/heads/master@{#43977}
This commit is contained in:
parent
3214ccf39b
commit
47da8de250
@ -1472,7 +1472,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kMipsLdc1:
|
||||
__ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
|
||||
__ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
|
||||
break;
|
||||
case kMipsUldc1:
|
||||
__ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
|
||||
@ -1482,7 +1482,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
|
||||
__ Move(kDoubleRegZero, 0.0);
|
||||
}
|
||||
__ sdc1(ft, i.MemoryOperand());
|
||||
__ Sdc1(ft, i.MemoryOperand());
|
||||
break;
|
||||
}
|
||||
case kMipsUsdc1: {
|
||||
@ -1495,7 +1495,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
case kMipsPush:
|
||||
if (instr->InputAt(0)->IsFPRegister()) {
|
||||
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
|
||||
__ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
|
||||
__ Subu(sp, sp, Operand(kDoubleSize));
|
||||
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
|
||||
} else {
|
||||
@ -1512,7 +1512,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
if (instr->InputAt(0)->IsFPRegister()) {
|
||||
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
|
||||
if (op->representation() == MachineRepresentation::kFloat64) {
|
||||
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
|
||||
__ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
|
||||
} else {
|
||||
DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
|
||||
__ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1)));
|
||||
@ -1545,7 +1545,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
|
||||
break;
|
||||
case kCheckedLoadFloat64:
|
||||
ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
|
||||
ASSEMBLE_CHECKED_LOAD_FLOAT(Double, Ldc1);
|
||||
break;
|
||||
case kCheckedStoreWord8:
|
||||
ASSEMBLE_CHECKED_STORE_INTEGER(sb);
|
||||
@ -1560,7 +1560,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
|
||||
break;
|
||||
case kCheckedStoreFloat64:
|
||||
ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
|
||||
ASSEMBLE_CHECKED_STORE_FLOAT(Double, Sdc1);
|
||||
break;
|
||||
case kCheckedLoadWord64:
|
||||
case kCheckedStoreWord64:
|
||||
@ -2222,7 +2222,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
: kScratchDoubleReg;
|
||||
__ Move(dst, src.ToFloat64());
|
||||
if (destination->IsFPStackSlot()) {
|
||||
__ sdc1(dst, g.ToMemOperand(destination));
|
||||
__ Sdc1(dst, g.ToMemOperand(destination));
|
||||
}
|
||||
}
|
||||
} else if (source->IsFPRegister()) {
|
||||
@ -2235,7 +2235,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
MachineRepresentation rep =
|
||||
LocationOperand::cast(source)->representation();
|
||||
if (rep == MachineRepresentation::kFloat64) {
|
||||
__ sdc1(src, g.ToMemOperand(destination));
|
||||
__ Sdc1(src, g.ToMemOperand(destination));
|
||||
} else if (rep == MachineRepresentation::kFloat32) {
|
||||
__ swc1(src, g.ToMemOperand(destination));
|
||||
} else {
|
||||
@ -2249,7 +2249,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
MachineRepresentation rep = LocationOperand::cast(source)->representation();
|
||||
if (destination->IsFPRegister()) {
|
||||
if (rep == MachineRepresentation::kFloat64) {
|
||||
__ ldc1(g.ToDoubleRegister(destination), src);
|
||||
__ Ldc1(g.ToDoubleRegister(destination), src);
|
||||
} else if (rep == MachineRepresentation::kFloat32) {
|
||||
__ lwc1(g.ToDoubleRegister(destination), src);
|
||||
} else {
|
||||
@ -2259,8 +2259,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
} else {
|
||||
FPURegister temp = kScratchDoubleReg;
|
||||
if (rep == MachineRepresentation::kFloat64) {
|
||||
__ ldc1(temp, src);
|
||||
__ sdc1(temp, g.ToMemOperand(destination));
|
||||
__ Ldc1(temp, src);
|
||||
__ Sdc1(temp, g.ToMemOperand(destination));
|
||||
} else if (rep == MachineRepresentation::kFloat32) {
|
||||
__ lwc1(temp, src);
|
||||
__ swc1(temp, g.ToMemOperand(destination));
|
||||
@ -2321,8 +2321,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
LocationOperand::cast(source)->representation();
|
||||
if (rep == MachineRepresentation::kFloat64) {
|
||||
__ Move(temp, src);
|
||||
__ ldc1(src, dst);
|
||||
__ sdc1(temp, dst);
|
||||
__ Ldc1(src, dst);
|
||||
__ Sdc1(temp, dst);
|
||||
} else if (rep == MachineRepresentation::kFloat32) {
|
||||
__ Move(temp, src);
|
||||
__ lwc1(src, dst);
|
||||
@ -2342,12 +2342,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
if (rep == MachineRepresentation::kFloat64) {
|
||||
MemOperand src1(src0.rm(), src0.offset() + kIntSize);
|
||||
MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
|
||||
__ ldc1(temp_1, dst0); // Save destination in temp_1.
|
||||
__ Ldc1(temp_1, dst0); // Save destination in temp_1.
|
||||
__ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
|
||||
__ sw(temp_0, dst0);
|
||||
__ lw(temp_0, src1);
|
||||
__ sw(temp_0, dst1);
|
||||
__ sdc1(temp_1, src0);
|
||||
__ Sdc1(temp_1, src0);
|
||||
} else if (rep == MachineRepresentation::kFloat32) {
|
||||
__ lwc1(temp_1, dst0); // Save destination in temp_1.
|
||||
__ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
|
||||
|
@ -115,7 +115,7 @@ void LCodeGen::SaveCallerDoubles() {
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
while (!save_iterator.Done()) {
|
||||
__ sdc1(DoubleRegister::from_code(save_iterator.Current()),
|
||||
__ Sdc1(DoubleRegister::from_code(save_iterator.Current()),
|
||||
MemOperand(sp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
@ -131,7 +131,7 @@ void LCodeGen::RestoreCallerDoubles() {
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
int count = 0;
|
||||
while (!save_iterator.Done()) {
|
||||
__ ldc1(DoubleRegister::from_code(save_iterator.Current()),
|
||||
__ Ldc1(DoubleRegister::from_code(save_iterator.Current()),
|
||||
MemOperand(sp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
@ -471,7 +471,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
|
||||
}
|
||||
} else if (op->IsStackSlot()) {
|
||||
MemOperand mem_op = ToMemOperand(op);
|
||||
__ ldc1(dbl_scratch, mem_op);
|
||||
__ Ldc1(dbl_scratch, mem_op);
|
||||
return dbl_scratch;
|
||||
}
|
||||
UNREACHABLE();
|
||||
@ -1948,7 +1948,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
||||
} else if (type.IsHeapNumber()) {
|
||||
DCHECK(!info()->IsStub());
|
||||
DoubleRegister dbl_scratch = double_scratch0();
|
||||
__ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
||||
__ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
||||
// Test the double value. Zero and NaN are false.
|
||||
EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
|
||||
} else if (type.IsString()) {
|
||||
@ -2030,7 +2030,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
||||
Label not_heap_number;
|
||||
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
|
||||
__ Branch(¬_heap_number, ne, map, Operand(at));
|
||||
__ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
||||
__ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
||||
__ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
|
||||
ne, dbl_scratch, kDoubleRegZero);
|
||||
// Falls through if dbl_scratch == 0.
|
||||
@ -2480,7 +2480,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
|
||||
|
||||
if (instr->hydrogen()->representation().IsDouble()) {
|
||||
DoubleRegister result = ToDoubleRegister(instr->result());
|
||||
__ ldc1(result, FieldMemOperand(object, offset));
|
||||
__ Ldc1(result, FieldMemOperand(object, offset));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2598,7 +2598,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
|
||||
__ lwc1(result, MemOperand(scratch0(), base_offset));
|
||||
__ cvt_d_s(result, result);
|
||||
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
|
||||
__ ldc1(result, MemOperand(scratch0(), base_offset));
|
||||
__ Ldc1(result, MemOperand(scratch0(), base_offset));
|
||||
}
|
||||
} else {
|
||||
Register result = ToRegister(instr->result());
|
||||
@ -2676,7 +2676,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
|
||||
__ Lsa(scratch, scratch, key, shift_size);
|
||||
}
|
||||
|
||||
__ ldc1(result, MemOperand(scratch));
|
||||
__ Ldc1(result, MemOperand(scratch));
|
||||
|
||||
if (instr->hydrogen()->RequiresHoleCheck()) {
|
||||
__ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
|
||||
@ -3603,7 +3603,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
||||
DCHECK(!instr->hydrogen()->has_transition());
|
||||
DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
|
||||
DoubleRegister value = ToDoubleRegister(instr->value());
|
||||
__ sdc1(value, FieldMemOperand(object, offset));
|
||||
__ Sdc1(value, FieldMemOperand(object, offset));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3721,7 +3721,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
|
||||
__ cvt_s_d(double_scratch0(), value);
|
||||
__ swc1(double_scratch0(), MemOperand(address, base_offset));
|
||||
} else { // Storing doubles, not floats.
|
||||
__ sdc1(value, MemOperand(address, base_offset));
|
||||
__ Sdc1(value, MemOperand(address, base_offset));
|
||||
}
|
||||
} else {
|
||||
Register value(ToRegister(instr->value()));
|
||||
@ -3801,14 +3801,14 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
|
||||
// Only load canonical NaN if the comparison above set the overflow.
|
||||
__ bind(&is_nan);
|
||||
__ LoadRoot(scratch_1, Heap::kNanValueRootIndex);
|
||||
__ ldc1(double_scratch,
|
||||
__ Ldc1(double_scratch,
|
||||
FieldMemOperand(scratch_1, HeapNumber::kValueOffset));
|
||||
__ sdc1(double_scratch, MemOperand(scratch, 0));
|
||||
__ Sdc1(double_scratch, MemOperand(scratch, 0));
|
||||
__ Branch(&done);
|
||||
}
|
||||
|
||||
__ bind(¬_nan);
|
||||
__ sdc1(value, MemOperand(scratch, 0));
|
||||
__ Sdc1(value, MemOperand(scratch, 0));
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
@ -4281,7 +4281,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
|
||||
// Done. Put the value in dbl_scratch into the value of the allocated heap
|
||||
// number.
|
||||
__ bind(&done);
|
||||
__ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
|
||||
__ Sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
|
||||
}
|
||||
|
||||
|
||||
@ -4311,7 +4311,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
||||
__ Branch(deferred->entry());
|
||||
}
|
||||
__ bind(deferred->exit());
|
||||
__ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
||||
__ Sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
||||
// Now that we have finished with the object's real address tag it
|
||||
}
|
||||
|
||||
@ -4392,7 +4392,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
|
||||
Operand(at));
|
||||
}
|
||||
// Load heap number.
|
||||
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
|
||||
__ Ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
|
||||
if (deoptimize_on_minus_zero) {
|
||||
__ mfc1(at, result_reg.low());
|
||||
__ Branch(&done, ne, at, Operand(zero_reg));
|
||||
@ -4408,7 +4408,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
|
||||
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
|
||||
input_reg, Operand(at));
|
||||
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
|
||||
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
|
||||
__ Ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
|
||||
__ Branch(&done);
|
||||
}
|
||||
} else {
|
||||
@ -4457,7 +4457,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
|
||||
Operand(at));
|
||||
|
||||
// Load the double value.
|
||||
__ ldc1(double_scratch,
|
||||
__ Ldc1(double_scratch,
|
||||
FieldMemOperand(input_reg, HeapNumber::kValueOffset));
|
||||
|
||||
Register except_flag = scratch2;
|
||||
@ -4822,8 +4822,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
||||
|
||||
// Heap number
|
||||
__ bind(&heap_number);
|
||||
__ ldc1(double_scratch0(), FieldMemOperand(input_reg,
|
||||
HeapNumber::kValueOffset));
|
||||
__ Ldc1(double_scratch0(),
|
||||
FieldMemOperand(input_reg, HeapNumber::kValueOffset));
|
||||
__ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
|
||||
__ jmp(&done);
|
||||
|
||||
|
@ -150,7 +150,7 @@ void LGapResolver::BreakCycle(int index) {
|
||||
} else if (source->IsDoubleRegister()) {
|
||||
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
|
||||
} else if (source->IsDoubleStackSlot()) {
|
||||
__ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
|
||||
__ Ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
@ -172,8 +172,7 @@ void LGapResolver::RestoreValue() {
|
||||
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
|
||||
kLithiumScratchDouble);
|
||||
} else if (saved_destination_->IsDoubleStackSlot()) {
|
||||
__ sdc1(kLithiumScratchDouble,
|
||||
cgen_->ToMemOperand(saved_destination_));
|
||||
__ Sdc1(kLithiumScratchDouble, cgen_->ToMemOperand(saved_destination_));
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
@ -259,13 +258,13 @@ void LGapResolver::EmitMove(int index) {
|
||||
} else {
|
||||
DCHECK(destination->IsDoubleStackSlot());
|
||||
MemOperand destination_operand = cgen_->ToMemOperand(destination);
|
||||
__ sdc1(source_register, destination_operand);
|
||||
__ Sdc1(source_register, destination_operand);
|
||||
}
|
||||
|
||||
} else if (source->IsDoubleStackSlot()) {
|
||||
MemOperand source_operand = cgen_->ToMemOperand(source);
|
||||
if (destination->IsDoubleRegister()) {
|
||||
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
|
||||
__ Ldc1(cgen_->ToDoubleRegister(destination), source_operand);
|
||||
} else {
|
||||
DCHECK(destination->IsDoubleStackSlot());
|
||||
MemOperand destination_operand = cgen_->ToMemOperand(destination);
|
||||
@ -281,8 +280,8 @@ void LGapResolver::EmitMove(int index) {
|
||||
__ lw(kLithiumScratchReg, source_high_operand);
|
||||
__ sw(kLithiumScratchReg, destination_high_operand);
|
||||
} else {
|
||||
__ ldc1(kLithiumScratchDouble, source_operand);
|
||||
__ sdc1(kLithiumScratchDouble, destination_operand);
|
||||
__ Ldc1(kLithiumScratchDouble, source_operand);
|
||||
__ Sdc1(kLithiumScratchDouble, destination_operand);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -2219,44 +2219,6 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
|
||||
}
|
||||
|
||||
|
||||
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
|
||||
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
|
||||
// load to two 32-bit loads.
|
||||
if (IsFp32Mode()) { // fp32 mode.
|
||||
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
|
||||
GenInstrImmediate(LWC1, src.rm(), fd,
|
||||
src.offset_ + Register::kMantissaOffset);
|
||||
FPURegister nextfpreg;
|
||||
nextfpreg.setcode(fd.code() + 1);
|
||||
GenInstrImmediate(LWC1, src.rm(), nextfpreg,
|
||||
src.offset_ + Register::kExponentOffset);
|
||||
} else { // Offset > 16 bits, use multiple instructions to load.
|
||||
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
|
||||
GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset);
|
||||
FPURegister nextfpreg;
|
||||
nextfpreg.setcode(fd.code() + 1);
|
||||
GenInstrImmediate(LWC1, at, nextfpreg, off16 + Register::kExponentOffset);
|
||||
}
|
||||
} else {
|
||||
DCHECK(IsFp64Mode() || IsFpxxMode());
|
||||
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
|
||||
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
|
||||
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
|
||||
GenInstrImmediate(LWC1, src.rm(), fd,
|
||||
src.offset_ + Register::kMantissaOffset);
|
||||
GenInstrImmediate(LW, src.rm(), at,
|
||||
src.offset_ + Register::kExponentOffset);
|
||||
mthc1(at, fd);
|
||||
} else { // Offset > 16 bits, use multiple instructions to load.
|
||||
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
|
||||
GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset);
|
||||
GenInstrImmediate(LW, at, at, off16 + Register::kExponentOffset);
|
||||
mthc1(at, fd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Assembler::swc1(FPURegister fd, const MemOperand& src) {
|
||||
if (is_int16(src.offset_)) {
|
||||
GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
|
||||
@ -2267,46 +2229,6 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
|
||||
}
|
||||
|
||||
|
||||
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
|
||||
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
|
||||
// store to two 32-bit stores.
|
||||
DCHECK(!src.rm().is(at));
|
||||
DCHECK(!src.rm().is(t8));
|
||||
if (IsFp32Mode()) { // fp32 mode.
|
||||
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
|
||||
GenInstrImmediate(SWC1, src.rm(), fd,
|
||||
src.offset_ + Register::kMantissaOffset);
|
||||
FPURegister nextfpreg;
|
||||
nextfpreg.setcode(fd.code() + 1);
|
||||
GenInstrImmediate(SWC1, src.rm(), nextfpreg,
|
||||
src.offset_ + Register::kExponentOffset);
|
||||
} else { // Offset > 16 bits, use multiple instructions to load.
|
||||
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
|
||||
GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset);
|
||||
FPURegister nextfpreg;
|
||||
nextfpreg.setcode(fd.code() + 1);
|
||||
GenInstrImmediate(SWC1, at, nextfpreg, off16 + Register::kExponentOffset);
|
||||
}
|
||||
} else {
|
||||
DCHECK(IsFp64Mode() || IsFpxxMode());
|
||||
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
|
||||
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
|
||||
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
|
||||
GenInstrImmediate(SWC1, src.rm(), fd,
|
||||
src.offset_ + Register::kMantissaOffset);
|
||||
mfhc1(at, fd);
|
||||
GenInstrImmediate(SW, src.rm(), at,
|
||||
src.offset_ + Register::kExponentOffset);
|
||||
} else { // Offset > 16 bits, use multiple instructions to load.
|
||||
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
|
||||
GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset);
|
||||
mfhc1(t8, fd);
|
||||
GenInstrImmediate(SW, at, t8, off16 + Register::kExponentOffset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Assembler::mtc1(Register rt, FPURegister fs) {
|
||||
GenInstrRegister(COP1, MTC1, rt, fs, f0);
|
||||
}
|
||||
|
@ -859,10 +859,7 @@ class Assembler : public AssemblerBase {
|
||||
|
||||
// Load, store, and move.
|
||||
void lwc1(FPURegister fd, const MemOperand& src);
|
||||
void ldc1(FPURegister fd, const MemOperand& src);
|
||||
|
||||
void swc1(FPURegister fs, const MemOperand& dst);
|
||||
void sdc1(FPURegister fs, const MemOperand& dst);
|
||||
|
||||
void mtc1(Register rt, FPURegister fs);
|
||||
void mthc1(Register rt, FPURegister fs);
|
||||
|
@ -93,7 +93,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
if (!skip_fastpath()) {
|
||||
// Load double input.
|
||||
__ ldc1(double_scratch, MemOperand(input_reg, double_offset));
|
||||
__ Ldc1(double_scratch, MemOperand(input_reg, double_offset));
|
||||
|
||||
// Clear cumulative exception flags and save the FCSR.
|
||||
__ cfc1(scratch2, FCSR);
|
||||
@ -347,7 +347,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
||||
__ sra(at, rhs, kSmiTagSize);
|
||||
__ mtc1(at, f14);
|
||||
__ cvt_d_w(f14, f14);
|
||||
__ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
|
||||
__ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
|
||||
|
||||
// We now have both loaded as doubles.
|
||||
__ jmp(both_loaded_as_doubles);
|
||||
@ -371,7 +371,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
||||
__ sra(at, lhs, kSmiTagSize);
|
||||
__ mtc1(at, f12);
|
||||
__ cvt_d_w(f12, f12);
|
||||
__ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
|
||||
__ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
|
||||
// Fall through to both_loaded_as_doubles.
|
||||
}
|
||||
|
||||
@ -428,8 +428,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
|
||||
|
||||
// Both are heap numbers. Load them up then jump to the code we have
|
||||
// for that.
|
||||
__ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
|
||||
__ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
|
||||
__ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
|
||||
__ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
|
||||
|
||||
__ jmp(both_loaded_as_doubles);
|
||||
}
|
||||
@ -763,7 +763,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
// Base is already in double_base.
|
||||
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
|
||||
|
||||
__ ldc1(double_exponent,
|
||||
__ Ldc1(double_exponent,
|
||||
FieldMemOperand(exponent, HeapNumber::kValueOffset));
|
||||
}
|
||||
|
||||
@ -1805,7 +1805,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
|
||||
__ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
|
||||
DONT_DO_SMI_CHECK);
|
||||
__ Subu(a2, a0, Operand(kHeapObjectTag));
|
||||
__ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
|
||||
__ Ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
|
||||
__ Branch(&left);
|
||||
__ bind(&right_smi);
|
||||
__ SmiUntag(a2, a0); // Can't clobber a0 yet.
|
||||
@ -1818,7 +1818,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
|
||||
__ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
|
||||
DONT_DO_SMI_CHECK);
|
||||
__ Subu(a2, a1, Operand(kHeapObjectTag));
|
||||
__ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
|
||||
__ Ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
|
||||
__ Branch(&done);
|
||||
__ bind(&left_smi);
|
||||
__ SmiUntag(a2, a1); // Can't clobber a1 yet.
|
||||
|
@ -122,7 +122,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
|
||||
int code = config->GetAllocatableDoubleCode(i);
|
||||
const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
|
||||
int offset = code * kDoubleSize;
|
||||
__ sdc1(fpu_reg, MemOperand(sp, offset));
|
||||
__ Sdc1(fpu_reg, MemOperand(sp, offset));
|
||||
}
|
||||
|
||||
// Push saved_regs (needed to populate FrameDescription::registers_).
|
||||
@ -199,8 +199,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
|
||||
int code = config->GetAllocatableDoubleCode(i);
|
||||
int dst_offset = code * kDoubleSize + double_regs_offset;
|
||||
int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
|
||||
__ ldc1(f0, MemOperand(sp, src_offset));
|
||||
__ sdc1(f0, MemOperand(a1, dst_offset));
|
||||
__ Ldc1(f0, MemOperand(sp, src_offset));
|
||||
__ Sdc1(f0, MemOperand(a1, dst_offset));
|
||||
}
|
||||
|
||||
// Remove the bailout id and the saved registers from the stack.
|
||||
@ -270,7 +270,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
|
||||
int code = config->GetAllocatableDoubleCode(i);
|
||||
const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
|
||||
int src_offset = code * kDoubleSize + double_regs_offset;
|
||||
__ ldc1(fpu_reg, MemOperand(a1, src_offset));
|
||||
__ Ldc1(fpu_reg, MemOperand(a1, src_offset));
|
||||
}
|
||||
|
||||
// Push state, pc, and continuation from the last output frame.
|
||||
|
@ -1291,7 +1291,7 @@ void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
|
||||
Register scratch) {
|
||||
DCHECK(!scratch.is(at));
|
||||
if (IsMipsArchVariant(kMips32r6)) {
|
||||
ldc1(fd, rs);
|
||||
Ldc1(fd, rs);
|
||||
} else {
|
||||
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
|
||||
IsMipsArchVariant(kLoongson));
|
||||
@ -1306,7 +1306,7 @@ void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
|
||||
Register scratch) {
|
||||
DCHECK(!scratch.is(at));
|
||||
if (IsMipsArchVariant(kMips32r6)) {
|
||||
sdc1(fd, rs);
|
||||
Sdc1(fd, rs);
|
||||
} else {
|
||||
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
|
||||
IsMipsArchVariant(kLoongson));
|
||||
@ -1317,6 +1317,75 @@ void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
|
||||
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
|
||||
// load to two 32-bit loads.
|
||||
if (IsFp32Mode()) { // fp32 mode.
|
||||
if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) {
|
||||
lwc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset));
|
||||
FPURegister nextfpreg;
|
||||
nextfpreg.setcode(fd.code() + 1);
|
||||
lwc1(nextfpreg,
|
||||
MemOperand(src.rm(), src.offset() + Register::kExponentOffset));
|
||||
} else { // Offset > 16 bits, use multiple instructions to load.
|
||||
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
|
||||
lwc1(fd, MemOperand(at, off16 + Register::kMantissaOffset));
|
||||
FPURegister nextfpreg;
|
||||
nextfpreg.setcode(fd.code() + 1);
|
||||
lwc1(nextfpreg, MemOperand(at, off16 + Register::kExponentOffset));
|
||||
}
|
||||
} else {
|
||||
DCHECK(IsFp64Mode() || IsFpxxMode());
|
||||
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
|
||||
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
|
||||
if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) {
|
||||
lwc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset));
|
||||
lw(at, MemOperand(src.rm(), src.offset() + Register::kExponentOffset));
|
||||
mthc1(at, fd);
|
||||
} else { // Offset > 16 bits, use multiple instructions to load.
|
||||
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
|
||||
lwc1(fd, MemOperand(at, off16 + Register::kMantissaOffset));
|
||||
lw(at, MemOperand(at, off16 + Register::kExponentOffset));
|
||||
mthc1(at, fd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
|
||||
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
|
||||
// store to two 32-bit stores.
|
||||
DCHECK(!src.rm().is(at));
|
||||
DCHECK(!src.rm().is(t8));
|
||||
if (IsFp32Mode()) { // fp32 mode.
|
||||
if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) {
|
||||
swc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset));
|
||||
FPURegister nextfpreg;
|
||||
nextfpreg.setcode(fd.code() + 1);
|
||||
swc1(nextfpreg,
|
||||
MemOperand(src.rm(), src.offset() + Register::kExponentOffset));
|
||||
} else { // Offset > 16 bits, use multiple instructions to load.
|
||||
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
|
||||
swc1(fd, MemOperand(at, off16 + Register::kMantissaOffset));
|
||||
FPURegister nextfpreg;
|
||||
nextfpreg.setcode(fd.code() + 1);
|
||||
swc1(nextfpreg, MemOperand(at, off16 + Register::kExponentOffset));
|
||||
}
|
||||
} else {
|
||||
DCHECK(IsFp64Mode() || IsFpxxMode());
|
||||
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
|
||||
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
|
||||
if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) {
|
||||
swc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset));
|
||||
mfhc1(at, fd);
|
||||
sw(at, MemOperand(src.rm(), src.offset() + Register::kExponentOffset));
|
||||
} else { // Offset > 16 bits, use multiple instructions to load.
|
||||
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
|
||||
swc1(fd, MemOperand(at, off16 + Register::kMantissaOffset));
|
||||
mfhc1(t8, fd);
|
||||
sw(t8, MemOperand(at, off16 + Register::kExponentOffset));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
|
||||
li(dst, Operand(value), mode);
|
||||
@ -1412,7 +1481,7 @@ void MacroAssembler::MultiPushFPU(RegList regs) {
|
||||
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
|
||||
if ((regs & (1 << i)) != 0) {
|
||||
stack_offset -= kDoubleSize;
|
||||
sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
|
||||
Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1426,7 +1495,7 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) {
|
||||
for (int16_t i = 0; i < kNumRegisters; i++) {
|
||||
if ((regs & (1 << i)) != 0) {
|
||||
stack_offset -= kDoubleSize;
|
||||
sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
|
||||
Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1437,7 +1506,7 @@ void MacroAssembler::MultiPopFPU(RegList regs) {
|
||||
|
||||
for (int16_t i = 0; i < kNumRegisters; i++) {
|
||||
if ((regs & (1 << i)) != 0) {
|
||||
ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
|
||||
Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
|
||||
stack_offset += kDoubleSize;
|
||||
}
|
||||
}
|
||||
@ -1450,7 +1519,7 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
|
||||
|
||||
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
|
||||
if ((regs & (1 << i)) != 0) {
|
||||
ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
|
||||
Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
|
||||
stack_offset += kDoubleSize;
|
||||
}
|
||||
}
|
||||
@ -2489,7 +2558,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
|
||||
// If we fell through then inline version didn't succeed - call stub instead.
|
||||
push(ra);
|
||||
Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
|
||||
sdc1(double_input, MemOperand(sp, 0));
|
||||
Sdc1(double_input, MemOperand(sp, 0));
|
||||
|
||||
DoubleToIStub stub(isolate(), sp, result, 0, true, true);
|
||||
CallStub(&stub);
|
||||
@ -2506,7 +2575,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
|
||||
DoubleRegister double_scratch = f12;
|
||||
DCHECK(!result.is(object));
|
||||
|
||||
ldc1(double_scratch,
|
||||
Ldc1(double_scratch,
|
||||
MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
|
||||
TryInlineTruncateDoubleToI(result, double_scratch, &done);
|
||||
|
||||
@ -4239,7 +4308,7 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
|
||||
Label* gc_required) {
|
||||
LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
|
||||
AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
|
||||
sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
|
||||
Sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
|
||||
}
|
||||
|
||||
|
||||
@ -4791,7 +4860,7 @@ void MacroAssembler::ObjectToDoubleFPURegister(Register object,
|
||||
And(exponent, exponent, mask_reg);
|
||||
Branch(not_number, eq, exponent, Operand(mask_reg));
|
||||
}
|
||||
ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
|
||||
Ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
@ -5418,7 +5487,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
|
||||
// Remember: we only need to save every 2nd double FPU value.
|
||||
for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
|
||||
FPURegister reg = FPURegister::from_code(i);
|
||||
sdc1(reg, MemOperand(sp, i * kDoubleSize));
|
||||
Sdc1(reg, MemOperand(sp, i * kDoubleSize));
|
||||
}
|
||||
}
|
||||
|
||||
@ -5448,7 +5517,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
|
||||
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
|
||||
for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
|
||||
FPURegister reg = FPURegister::from_code(i);
|
||||
ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
|
||||
Ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -673,6 +673,9 @@ class MacroAssembler: public Assembler {
|
||||
void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
|
||||
void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
|
||||
|
||||
void Ldc1(FPURegister fd, const MemOperand& src);
|
||||
void Sdc1(FPURegister fs, const MemOperand& dst);
|
||||
|
||||
// Load int32 in the rd register.
|
||||
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
|
||||
inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
|
||||
|
@ -279,36 +279,36 @@ TEST(MIPS3) {
|
||||
Label L, C;
|
||||
|
||||
// Double precision floating point instructions.
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, a)) );
|
||||
__ ldc1(f6, MemOperand(a0, offsetof(T, b)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
|
||||
__ Ldc1(f6, MemOperand(a0, offsetof(T, b)));
|
||||
__ add_d(f8, f4, f6);
|
||||
__ sdc1(f8, MemOperand(a0, offsetof(T, c)) ); // c = a + b.
|
||||
__ Sdc1(f8, MemOperand(a0, offsetof(T, c))); // c = a + b.
|
||||
|
||||
__ mov_d(f10, f8); // c
|
||||
__ neg_d(f12, f6); // -b
|
||||
__ sub_d(f10, f10, f12);
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(T, d)) ); // d = c - (-b).
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(T, d))); // d = c - (-b).
|
||||
|
||||
__ sdc1(f4, MemOperand(a0, offsetof(T, b)) ); // b = a.
|
||||
__ Sdc1(f4, MemOperand(a0, offsetof(T, b))); // b = a.
|
||||
|
||||
__ li(t0, 120);
|
||||
__ mtc1(t0, f14);
|
||||
__ cvt_d_w(f14, f14); // f14 = 120.0.
|
||||
__ mul_d(f10, f10, f14);
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(T, e)) ); // e = d * 120 = 1.8066e16.
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(T, e))); // e = d * 120 = 1.8066e16.
|
||||
|
||||
__ div_d(f12, f10, f4);
|
||||
__ sdc1(f12, MemOperand(a0, offsetof(T, f)) ); // f = e / a = 120.44.
|
||||
__ Sdc1(f12, MemOperand(a0, offsetof(T, f))); // f = e / a = 120.44.
|
||||
|
||||
__ sqrt_d(f14, f12);
|
||||
__ sdc1(f14, MemOperand(a0, offsetof(T, g)) );
|
||||
__ Sdc1(f14, MemOperand(a0, offsetof(T, g)));
|
||||
// g = sqrt(f) = 10.97451593465515908537
|
||||
|
||||
if (IsMipsArchVariant(kMips32r2)) {
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, h)) );
|
||||
__ ldc1(f6, MemOperand(a0, offsetof(T, i)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, h)));
|
||||
__ Ldc1(f6, MemOperand(a0, offsetof(T, i)));
|
||||
__ madd_d(f14, f6, f4, f6);
|
||||
__ sdc1(f14, MemOperand(a0, offsetof(T, h)) );
|
||||
__ Sdc1(f14, MemOperand(a0, offsetof(T, h)));
|
||||
}
|
||||
|
||||
// Single precision floating point instructions.
|
||||
@ -404,11 +404,11 @@ TEST(MIPS4) {
|
||||
} T;
|
||||
T t;
|
||||
|
||||
Assembler assm(isolate, NULL, 0);
|
||||
MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
|
||||
Label L, C;
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, a)) );
|
||||
__ ldc1(f6, MemOperand(a0, offsetof(T, b)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
|
||||
__ Ldc1(f6, MemOperand(a0, offsetof(T, b)));
|
||||
|
||||
// Swap f4 and f6, by using four integer registers, t0-t3.
|
||||
if (IsFp32Mode()) {
|
||||
@ -436,8 +436,8 @@ TEST(MIPS4) {
|
||||
}
|
||||
|
||||
// Store the swapped f4 and f5 back to memory.
|
||||
__ sdc1(f4, MemOperand(a0, offsetof(T, a)) );
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(T, c)) );
|
||||
__ Sdc1(f4, MemOperand(a0, offsetof(T, a)));
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(T, c)));
|
||||
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
@ -473,12 +473,12 @@ TEST(MIPS5) {
|
||||
} T;
|
||||
T t;
|
||||
|
||||
Assembler assm(isolate, NULL, 0);
|
||||
MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
|
||||
Label L, C;
|
||||
|
||||
// Load all structure elements to registers.
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, a)) );
|
||||
__ ldc1(f6, MemOperand(a0, offsetof(T, b)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
|
||||
__ Ldc1(f6, MemOperand(a0, offsetof(T, b)));
|
||||
__ lw(t0, MemOperand(a0, offsetof(T, i)) );
|
||||
__ lw(t1, MemOperand(a0, offsetof(T, j)) );
|
||||
|
||||
@ -495,12 +495,12 @@ TEST(MIPS5) {
|
||||
// Convert int in original i (t0) to double in a.
|
||||
__ mtc1(t0, f12);
|
||||
__ cvt_d_w(f0, f12);
|
||||
__ sdc1(f0, MemOperand(a0, offsetof(T, a)) );
|
||||
__ Sdc1(f0, MemOperand(a0, offsetof(T, a)));
|
||||
|
||||
// Convert int in original j (t1) to double in b.
|
||||
__ mtc1(t1, f14);
|
||||
__ cvt_d_w(f2, f14);
|
||||
__ sdc1(f2, MemOperand(a0, offsetof(T, b)) );
|
||||
__ Sdc1(f2, MemOperand(a0, offsetof(T, b)));
|
||||
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
@ -626,8 +626,8 @@ TEST(MIPS7) {
|
||||
MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
|
||||
Label neither_is_nan, less_than, outa_here;
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, a)) );
|
||||
__ ldc1(f6, MemOperand(a0, offsetof(T, b)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
|
||||
__ Ldc1(f6, MemOperand(a0, offsetof(T, b)));
|
||||
if (!IsMipsArchVariant(kMips32r6)) {
|
||||
__ c(UN, D, f4, f6);
|
||||
__ bc1f(&neither_is_nan);
|
||||
@ -835,14 +835,14 @@ TEST(MIPS10) {
|
||||
} T;
|
||||
T t;
|
||||
|
||||
Assembler assm(isolate, NULL, 0);
|
||||
MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
|
||||
Label L, C;
|
||||
|
||||
if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) return;
|
||||
|
||||
// Load all structure elements to registers.
|
||||
// (f0, f1) = a (fp32), f0 = a (fp64)
|
||||
__ ldc1(f0, MemOperand(a0, offsetof(T, a)));
|
||||
__ Ldc1(f0, MemOperand(a0, offsetof(T, a)));
|
||||
|
||||
__ mfc1(t0, f0); // t0 = f0(31..0)
|
||||
__ mfhc1(t1, f0); // t1 = sign_extend(f0(63..32))
|
||||
@ -858,7 +858,7 @@ TEST(MIPS10) {
|
||||
__ lw(t0, MemOperand(a0, offsetof(T, b_word)));
|
||||
__ mtc1(t0, f8); // f8 has a 32-bits word.
|
||||
__ cvt_d_w(f10, f8);
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(T, b)));
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(T, b)));
|
||||
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
@ -1156,14 +1156,14 @@ TEST(MIPS13) {
|
||||
|
||||
__ sw(t0, MemOperand(a0, offsetof(T, cvt_small_in)));
|
||||
__ Cvt_d_uw(f10, t0, f4);
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(T, cvt_small_out)));
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(T, cvt_small_out)));
|
||||
|
||||
__ Trunc_uw_d(f10, f10, f4);
|
||||
__ swc1(f10, MemOperand(a0, offsetof(T, trunc_small_out)));
|
||||
|
||||
__ sw(t0, MemOperand(a0, offsetof(T, cvt_big_in)));
|
||||
__ Cvt_d_uw(f8, t0, f4);
|
||||
__ sdc1(f8, MemOperand(a0, offsetof(T, cvt_big_out)));
|
||||
__ Sdc1(f8, MemOperand(a0, offsetof(T, cvt_big_out)));
|
||||
|
||||
__ Trunc_uw_d(f8, f8, f4);
|
||||
__ swc1(f8, MemOperand(a0, offsetof(T, trunc_big_out)));
|
||||
@ -1236,48 +1236,48 @@ TEST(MIPS14) {
|
||||
__ cfc1(a1, FCSR);
|
||||
// Disable FPU exceptions.
|
||||
__ ctc1(zero_reg, FCSR);
|
||||
#define RUN_ROUND_TEST(x) \
|
||||
__ cfc1(t0, FCSR);\
|
||||
__ sw(t0, MemOperand(a0, offsetof(T, x##_isNaN2008))); \
|
||||
__ ldc1(f0, MemOperand(a0, offsetof(T, round_up_in))); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ swc1(f0, MemOperand(a0, offsetof(T, x##_up_out))); \
|
||||
\
|
||||
__ ldc1(f0, MemOperand(a0, offsetof(T, round_down_in))); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ swc1(f0, MemOperand(a0, offsetof(T, x##_down_out))); \
|
||||
\
|
||||
__ ldc1(f0, MemOperand(a0, offsetof(T, neg_round_up_in))); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_up_out))); \
|
||||
\
|
||||
__ ldc1(f0, MemOperand(a0, offsetof(T, neg_round_down_in))); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
#define RUN_ROUND_TEST(x) \
|
||||
__ cfc1(t0, FCSR); \
|
||||
__ sw(t0, MemOperand(a0, offsetof(T, x##_isNaN2008))); \
|
||||
__ Ldc1(f0, MemOperand(a0, offsetof(T, round_up_in))); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ swc1(f0, MemOperand(a0, offsetof(T, x##_up_out))); \
|
||||
\
|
||||
__ Ldc1(f0, MemOperand(a0, offsetof(T, round_down_in))); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ swc1(f0, MemOperand(a0, offsetof(T, x##_down_out))); \
|
||||
\
|
||||
__ Ldc1(f0, MemOperand(a0, offsetof(T, neg_round_up_in))); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_up_out))); \
|
||||
\
|
||||
__ Ldc1(f0, MemOperand(a0, offsetof(T, neg_round_down_in))); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_down_out))); \
|
||||
\
|
||||
__ ldc1(f0, MemOperand(a0, offsetof(T, err1_in))); \
|
||||
__ ctc1(zero_reg, FCSR); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ cfc1(a2, FCSR); \
|
||||
__ sw(a2, MemOperand(a0, offsetof(T, x##_err1_out))); \
|
||||
\
|
||||
__ ldc1(f0, MemOperand(a0, offsetof(T, err2_in))); \
|
||||
__ ctc1(zero_reg, FCSR); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ cfc1(a2, FCSR); \
|
||||
__ sw(a2, MemOperand(a0, offsetof(T, x##_err2_out))); \
|
||||
\
|
||||
__ ldc1(f0, MemOperand(a0, offsetof(T, err3_in))); \
|
||||
__ ctc1(zero_reg, FCSR); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ cfc1(a2, FCSR); \
|
||||
__ sw(a2, MemOperand(a0, offsetof(T, x##_err3_out))); \
|
||||
\
|
||||
__ ldc1(f0, MemOperand(a0, offsetof(T, err4_in))); \
|
||||
__ ctc1(zero_reg, FCSR); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ cfc1(a2, FCSR); \
|
||||
__ sw(a2, MemOperand(a0, offsetof(T, x##_err4_out))); \
|
||||
\
|
||||
__ Ldc1(f0, MemOperand(a0, offsetof(T, err1_in))); \
|
||||
__ ctc1(zero_reg, FCSR); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ cfc1(a2, FCSR); \
|
||||
__ sw(a2, MemOperand(a0, offsetof(T, x##_err1_out))); \
|
||||
\
|
||||
__ Ldc1(f0, MemOperand(a0, offsetof(T, err2_in))); \
|
||||
__ ctc1(zero_reg, FCSR); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ cfc1(a2, FCSR); \
|
||||
__ sw(a2, MemOperand(a0, offsetof(T, x##_err2_out))); \
|
||||
\
|
||||
__ Ldc1(f0, MemOperand(a0, offsetof(T, err3_in))); \
|
||||
__ ctc1(zero_reg, FCSR); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ cfc1(a2, FCSR); \
|
||||
__ sw(a2, MemOperand(a0, offsetof(T, x##_err3_out))); \
|
||||
\
|
||||
__ Ldc1(f0, MemOperand(a0, offsetof(T, err4_in))); \
|
||||
__ ctc1(zero_reg, FCSR); \
|
||||
__ x##_w_d(f0, f0); \
|
||||
__ cfc1(a2, FCSR); \
|
||||
__ sw(a2, MemOperand(a0, offsetof(T, x##_err4_out))); \
|
||||
__ swc1(f0, MemOperand(a0, offsetof(T, x##_invalid_result)));
|
||||
|
||||
RUN_ROUND_TEST(round)
|
||||
@ -1384,16 +1384,16 @@ TEST(seleqz_selnez) {
|
||||
__ selnez(t3, t1, t1); // t3 = 1
|
||||
__ sw(t3, MemOperand(a0, offsetof(Test, d))); // d = 1
|
||||
// Floating point part of test.
|
||||
__ ldc1(f0, MemOperand(a0, offsetof(Test, e)) ); // src
|
||||
__ ldc1(f2, MemOperand(a0, offsetof(Test, f)) ); // test
|
||||
__ Ldc1(f0, MemOperand(a0, offsetof(Test, e))); // src
|
||||
__ Ldc1(f2, MemOperand(a0, offsetof(Test, f))); // test
|
||||
__ lwc1(f8, MemOperand(a0, offsetof(Test, i)) ); // src
|
||||
__ lwc1(f10, MemOperand(a0, offsetof(Test, j)) ); // test
|
||||
__ seleqz_d(f4, f0, f2);
|
||||
__ selnez_d(f6, f0, f2);
|
||||
__ seleqz_s(f12, f8, f10);
|
||||
__ selnez_s(f14, f8, f10);
|
||||
__ sdc1(f4, MemOperand(a0, offsetof(Test, g)) ); // src
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(Test, h)) ); // src
|
||||
__ Sdc1(f4, MemOperand(a0, offsetof(Test, g))); // src
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(Test, h))); // src
|
||||
__ swc1(f12, MemOperand(a0, offsetof(Test, k)) ); // src
|
||||
__ swc1(f14, MemOperand(a0, offsetof(Test, l)) ); // src
|
||||
__ jr(ra);
|
||||
@ -1498,16 +1498,16 @@ TEST(min_max) {
|
||||
float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, finf,
|
||||
finf, finf, finf, finf, finf, fnan};
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
__ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
__ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
|
||||
__ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e)));
|
||||
__ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f)));
|
||||
__ min_d(f10, f4, f8);
|
||||
__ max_d(f12, f4, f8);
|
||||
__ min_s(f14, f2, f6);
|
||||
__ max_s(f16, f2, f6);
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
|
||||
__ sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
|
||||
__ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
|
||||
__ swc1(f14, MemOperand(a0, offsetof(TestFloat, g)));
|
||||
__ swc1(f16, MemOperand(a0, offsetof(TestFloat, h)));
|
||||
__ jr(ra);
|
||||
@ -1614,12 +1614,12 @@ TEST(rint_d) {
|
||||
int fcsr_inputs[4] =
|
||||
{kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
|
||||
double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
__ lw(t0, MemOperand(a0, offsetof(TestFloat, fcsr)) );
|
||||
__ cfc1(t1, FCSR);
|
||||
__ ctc1(t0, FCSR);
|
||||
__ rint_d(f8, f4);
|
||||
__ sdc1(f8, MemOperand(a0, offsetof(TestFloat, b)) );
|
||||
__ Sdc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
|
||||
__ ctc1(t1, FCSR);
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
@ -1660,15 +1660,15 @@ TEST(sel) {
|
||||
} Test;
|
||||
|
||||
Test test;
|
||||
__ ldc1(f0, MemOperand(a0, offsetof(Test, dd)) ); // test
|
||||
__ ldc1(f2, MemOperand(a0, offsetof(Test, ds)) ); // src1
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(Test, dt)) ); // src2
|
||||
__ Ldc1(f0, MemOperand(a0, offsetof(Test, dd))); // test
|
||||
__ Ldc1(f2, MemOperand(a0, offsetof(Test, ds))); // src1
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(Test, dt))); // src2
|
||||
__ lwc1(f6, MemOperand(a0, offsetof(Test, fd)) ); // test
|
||||
__ lwc1(f8, MemOperand(a0, offsetof(Test, fs)) ); // src1
|
||||
__ lwc1(f10, MemOperand(a0, offsetof(Test, ft)) ); // src2
|
||||
__ sel_d(f0, f2, f4);
|
||||
__ sel_s(f6, f8, f10);
|
||||
__ sdc1(f0, MemOperand(a0, offsetof(Test, dd)) );
|
||||
__ Sdc1(f0, MemOperand(a0, offsetof(Test, dd)));
|
||||
__ swc1(f6, MemOperand(a0, offsetof(Test, fd)) );
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
@ -1850,7 +1850,7 @@ TEST(Cvt_d_uw) {
|
||||
|
||||
__ lw(t1, MemOperand(a0, offsetof(TestStruct, input)));
|
||||
__ Cvt_d_uw(f4, t1, f6);
|
||||
__ sdc1(f4, MemOperand(a0, offsetof(TestStruct, output)));
|
||||
__ Sdc1(f4, MemOperand(a0, offsetof(TestStruct, output)));
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
|
||||
@ -1921,8 +1921,8 @@ TEST(mina_maxa) {
|
||||
5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8,
|
||||
3.0, 3.0, 0.0, 0.0, finf, finf, finf, finf, finf, finf, fnan};
|
||||
|
||||
__ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, b)) );
|
||||
__ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, b)));
|
||||
__ lwc1(f8, MemOperand(a0, offsetof(TestFloat, c)) );
|
||||
__ lwc1(f10, MemOperand(a0, offsetof(TestFloat, d)) );
|
||||
__ mina_d(f6, f2, f4);
|
||||
@ -1930,9 +1930,9 @@ TEST(mina_maxa) {
|
||||
__ maxa_d(f14, f2, f4);
|
||||
__ maxa_s(f16, f8, f10);
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, resf)) );
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(TestFloat, resd)) );
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(TestFloat, resd)));
|
||||
__ swc1(f16, MemOperand(a0, offsetof(TestFloat, resf1)) );
|
||||
__ sdc1(f14, MemOperand(a0, offsetof(TestFloat, resd1)) );
|
||||
__ Sdc1(f14, MemOperand(a0, offsetof(TestFloat, resd1)));
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
|
||||
@ -2008,12 +2008,12 @@ TEST(trunc_l) {
|
||||
|
||||
__ cfc1(t1, FCSR);
|
||||
__ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
|
||||
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
|
||||
__ trunc_l_d(f8, f4);
|
||||
__ trunc_l_s(f10, f6);
|
||||
__ sdc1(f8, MemOperand(a0, offsetof(Test, c)) );
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(Test, d)) );
|
||||
__ Sdc1(f8, MemOperand(a0, offsetof(Test, c)));
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(Test, d)));
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
Test test;
|
||||
@ -2076,25 +2076,25 @@ TEST(movz_movn) {
|
||||
5.3, -5.3, 5.3, -2.9
|
||||
};
|
||||
|
||||
__ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
|
||||
__ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
__ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) );
|
||||
__ lw(t0, MemOperand(a0, offsetof(TestFloat, rt)) );
|
||||
__ Move(f12, 0.0);
|
||||
__ Move(f10, 0.0);
|
||||
__ Move(f16, 0.0);
|
||||
__ Move(f14, 0.0);
|
||||
__ sdc1(f12, MemOperand(a0, offsetof(TestFloat, bold)) );
|
||||
__ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, bold)));
|
||||
__ swc1(f10, MemOperand(a0, offsetof(TestFloat, dold)) );
|
||||
__ sdc1(f16, MemOperand(a0, offsetof(TestFloat, bold1)) );
|
||||
__ Sdc1(f16, MemOperand(a0, offsetof(TestFloat, bold1)));
|
||||
__ swc1(f14, MemOperand(a0, offsetof(TestFloat, dold1)) );
|
||||
__ movz_s(f10, f6, t0);
|
||||
__ movz_d(f12, f2, t0);
|
||||
__ movn_s(f14, f6, t0);
|
||||
__ movn_d(f16, f2, t0);
|
||||
__ swc1(f10, MemOperand(a0, offsetof(TestFloat, d)) );
|
||||
__ sdc1(f12, MemOperand(a0, offsetof(TestFloat, b)) );
|
||||
__ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, b)));
|
||||
__ swc1(f14, MemOperand(a0, offsetof(TestFloat, d1)) );
|
||||
__ sdc1(f16, MemOperand(a0, offsetof(TestFloat, b1)) );
|
||||
__ Sdc1(f16, MemOperand(a0, offsetof(TestFloat, b1)));
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
|
||||
@ -2176,7 +2176,7 @@ TEST(movt_movd) {
|
||||
HandleScope scope(isolate);
|
||||
MacroAssembler assm(isolate, NULL, 0,
|
||||
v8::internal::CodeObjectRequired::kYes);
|
||||
__ ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd)) );
|
||||
__ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd)));
|
||||
__ lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf)) );
|
||||
__ lw(t1, MemOperand(a0, offsetof(TestFloat, fcsr)) );
|
||||
__ cfc1(t0, FCSR);
|
||||
@ -2184,18 +2184,18 @@ TEST(movt_movd) {
|
||||
__ li(t2, 0x0);
|
||||
__ mtc1(t2, f12);
|
||||
__ mtc1(t2, f10);
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold)) );
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold)));
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold)) );
|
||||
__ movt_s(f12, f4, test.cc);
|
||||
__ movt_d(f10, f2, test.cc);
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf)) );
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd)) );
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold1)) );
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd)));
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold1)));
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold1)) );
|
||||
__ movf_s(f12, f4, test.cc);
|
||||
__ movf_d(f10, f2, test.cc);
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf1)) );
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd1)) );
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd1)));
|
||||
__ ctc1(t0, FCSR);
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
@ -2275,7 +2275,7 @@ TEST(cvt_w_d) {
|
||||
int fcsr_inputs[4] =
|
||||
{kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
|
||||
double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
|
||||
__ lw(t0, MemOperand(a0, offsetof(Test, fcsr)) );
|
||||
__ cfc1(t1, FCSR);
|
||||
__ ctc1(t0, FCSR);
|
||||
@ -2343,7 +2343,7 @@ TEST(trunc_w) {
|
||||
|
||||
__ cfc1(t1, FCSR);
|
||||
__ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
|
||||
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
|
||||
__ trunc_w_d(f8, f4);
|
||||
__ trunc_w_s(f10, f6);
|
||||
@ -2412,7 +2412,7 @@ TEST(round_w) {
|
||||
|
||||
__ cfc1(t1, FCSR);
|
||||
__ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
|
||||
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
|
||||
__ round_w_d(f8, f4);
|
||||
__ round_w_s(f10, f6);
|
||||
@ -2484,12 +2484,12 @@ TEST(round_l) {
|
||||
|
||||
__ cfc1(t1, FCSR);
|
||||
__ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
|
||||
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
|
||||
__ round_l_d(f8, f4);
|
||||
__ round_l_s(f10, f6);
|
||||
__ sdc1(f8, MemOperand(a0, offsetof(Test, c)) );
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(Test, d)) );
|
||||
__ Sdc1(f8, MemOperand(a0, offsetof(Test, c)));
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(Test, d)));
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
Test test;
|
||||
@ -2557,12 +2557,12 @@ TEST(sub) {
|
||||
};
|
||||
__ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
|
||||
__ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)) );
|
||||
__ ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)) );
|
||||
__ ldc1(f10, MemOperand(a0, offsetof(TestFloat, d)) );
|
||||
__ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)));
|
||||
__ Ldc1(f10, MemOperand(a0, offsetof(TestFloat, d)));
|
||||
__ sub_s(f6, f2, f4);
|
||||
__ sub_d(f12, f8, f10);
|
||||
__ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) );
|
||||
__ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) );
|
||||
__ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)));
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
|
||||
@ -2623,7 +2623,7 @@ TEST(sqrt_rsqrt_recip) {
|
||||
|
||||
|
||||
__ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
|
||||
__ ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)) );
|
||||
__ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)));
|
||||
__ sqrt_s(f6, f2);
|
||||
__ sqrt_d(f12, f8);
|
||||
|
||||
@ -2634,13 +2634,13 @@ TEST(sqrt_rsqrt_recip) {
|
||||
__ recip_s(f4, f2);
|
||||
}
|
||||
__ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) );
|
||||
__ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) );
|
||||
__ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)));
|
||||
|
||||
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
|
||||
__ swc1(f16, MemOperand(a0, offsetof(TestFloat, resultS1)) );
|
||||
__ sdc1(f14, MemOperand(a0, offsetof(TestFloat, resultD1)) );
|
||||
__ Sdc1(f14, MemOperand(a0, offsetof(TestFloat, resultD1)));
|
||||
__ swc1(f4, MemOperand(a0, offsetof(TestFloat, resultS2)) );
|
||||
__ sdc1(f18, MemOperand(a0, offsetof(TestFloat, resultD2)) );
|
||||
__ Sdc1(f18, MemOperand(a0, offsetof(TestFloat, resultD2)));
|
||||
}
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
@ -2717,11 +2717,11 @@ TEST(neg) {
|
||||
0.0, -4.0, 2.0
|
||||
};
|
||||
__ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
|
||||
__ ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)) );
|
||||
__ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)));
|
||||
__ neg_s(f6, f2);
|
||||
__ neg_d(f12, f8);
|
||||
__ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) );
|
||||
__ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) );
|
||||
__ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)));
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
|
||||
@ -2773,12 +2773,12 @@ TEST(mul) {
|
||||
|
||||
__ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
|
||||
__ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)) );
|
||||
__ ldc1(f6, MemOperand(a0, offsetof(TestFloat, c)) );
|
||||
__ ldc1(f8, MemOperand(a0, offsetof(TestFloat, d)) );
|
||||
__ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, c)));
|
||||
__ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, d)));
|
||||
__ mul_s(f10, f2, f4);
|
||||
__ mul_d(f12, f6, f8);
|
||||
__ swc1(f10, MemOperand(a0, offsetof(TestFloat, resultS)) );
|
||||
__ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) );
|
||||
__ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)));
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
|
||||
@ -2828,12 +2828,12 @@ TEST(mov) {
|
||||
5.3, -5.3, 5.3, -2.9
|
||||
};
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
__ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) );
|
||||
__ mov_s(f8, f6);
|
||||
__ mov_d(f10, f4);
|
||||
__ swc1(f8, MemOperand(a0, offsetof(TestFloat, d)) );
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(TestFloat, b)) );
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, b)));
|
||||
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
@ -2896,7 +2896,7 @@ TEST(floor_w) {
|
||||
|
||||
__ cfc1(t1, FCSR);
|
||||
__ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
|
||||
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
|
||||
__ floor_w_d(f8, f4);
|
||||
__ floor_w_s(f10, f6);
|
||||
@ -2968,12 +2968,12 @@ TEST(floor_l) {
|
||||
|
||||
__ cfc1(t1, FCSR);
|
||||
__ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
|
||||
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
|
||||
__ floor_l_d(f8, f4);
|
||||
__ floor_l_s(f10, f6);
|
||||
__ sdc1(f8, MemOperand(a0, offsetof(Test, c)) );
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(Test, d)) );
|
||||
__ Sdc1(f8, MemOperand(a0, offsetof(Test, c)));
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(Test, d)));
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
Test test;
|
||||
@ -3040,7 +3040,7 @@ TEST(ceil_w) {
|
||||
|
||||
__ cfc1(t1, FCSR);
|
||||
__ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
|
||||
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
|
||||
__ ceil_w_d(f8, f4);
|
||||
__ ceil_w_s(f10, f6);
|
||||
@ -3112,12 +3112,12 @@ TEST(ceil_l) {
|
||||
|
||||
__ cfc1(t1, FCSR);
|
||||
__ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
|
||||
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
|
||||
__ ceil_l_d(f8, f4);
|
||||
__ ceil_l_s(f10, f6);
|
||||
__ sdc1(f8, MemOperand(a0, offsetof(Test, c)) );
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(Test, d)) );
|
||||
__ Sdc1(f8, MemOperand(a0, offsetof(Test, c)));
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(Test, d)));
|
||||
__ jr(ra);
|
||||
__ nop();
|
||||
Test test;
|
||||
@ -3442,45 +3442,45 @@ TEST(class_fmt) {
|
||||
MacroAssembler assm(isolate, NULL, 0,
|
||||
v8::internal::CodeObjectRequired::kYes);
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan)));
|
||||
__ class_d(f6, f4);
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(T, dSignalingNan)));
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(T, dSignalingNan)));
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, dQuietNan)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, dQuietNan)));
|
||||
__ class_d(f6, f4);
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(T, dQuietNan)));
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(T, dQuietNan)));
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, dNegInf)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, dNegInf)));
|
||||
__ class_d(f6, f4);
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(T, dNegInf)));
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(T, dNegInf)));
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, dNegNorm)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, dNegNorm)));
|
||||
__ class_d(f6, f4);
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(T, dNegNorm)));
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(T, dNegNorm)));
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, dNegSubnorm)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, dNegSubnorm)));
|
||||
__ class_d(f6, f4);
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(T, dNegSubnorm)));
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(T, dNegSubnorm)));
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, dNegZero)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, dNegZero)));
|
||||
__ class_d(f6, f4);
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(T, dNegZero)));
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(T, dNegZero)));
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, dPosInf)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, dPosInf)));
|
||||
__ class_d(f6, f4);
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(T, dPosInf)));
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(T, dPosInf)));
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, dPosNorm)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, dPosNorm)));
|
||||
__ class_d(f6, f4);
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(T, dPosNorm)));
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(T, dPosNorm)));
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, dPosSubnorm)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, dPosSubnorm)));
|
||||
__ class_d(f6, f4);
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(T, dPosSubnorm)));
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(T, dPosSubnorm)));
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(T, dPosZero)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(T, dPosZero)));
|
||||
__ class_d(f6, f4);
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(T, dPosZero)));
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(T, dPosZero)));
|
||||
|
||||
// Testing instruction CLASS.S
|
||||
__ lwc1(f4, MemOperand(a0, offsetof(T, fSignalingNan)));
|
||||
@ -3604,9 +3604,9 @@ TEST(ABS) {
|
||||
// Disable FPU exceptions.
|
||||
__ ctc1(zero_reg, FCSR);
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
__ abs_d(f10, f4);
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
|
||||
__ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)));
|
||||
__ abs_s(f10, f4);
|
||||
@ -3698,10 +3698,10 @@ TEST(ADD_FMT) {
|
||||
|
||||
TestFloat test;
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
__ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
__ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
|
||||
__ add_d(f10, f8, f4);
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
|
||||
|
||||
__ lwc1(f4, MemOperand(a0, offsetof(TestFloat, fa)));
|
||||
__ lwc1(f8, MemOperand(a0, offsetof(TestFloat, fb)));
|
||||
@ -3785,8 +3785,8 @@ TEST(C_COND_FMT) {
|
||||
|
||||
__ li(t1, 1);
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1)));
|
||||
__ ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1)));
|
||||
__ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2)));
|
||||
|
||||
__ lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1)));
|
||||
__ lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2)));
|
||||
@ -4003,65 +4003,65 @@ TEST(CMP_COND_FMT) {
|
||||
|
||||
__ li(t1, 1);
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1)));
|
||||
__ ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1)));
|
||||
__ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2)));
|
||||
|
||||
__ lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1)));
|
||||
__ lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2)));
|
||||
|
||||
__ cmp_d(F, f2, f4, f6);
|
||||
__ cmp_s(F, f12, f14, f16);
|
||||
__ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dF)) );
|
||||
__ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dF)));
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, fF)) );
|
||||
|
||||
__ cmp_d(UN, f2, f4, f6);
|
||||
__ cmp_s(UN, f12, f14, f16);
|
||||
__ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUn)) );
|
||||
__ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUn)));
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUn)) );
|
||||
|
||||
__ cmp_d(EQ, f2, f4, f6);
|
||||
__ cmp_s(EQ, f12, f14, f16);
|
||||
__ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dEq)) );
|
||||
__ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dEq)));
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, fEq)) );
|
||||
|
||||
__ cmp_d(UEQ, f2, f4, f6);
|
||||
__ cmp_s(UEQ, f12, f14, f16);
|
||||
__ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUeq)) );
|
||||
__ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUeq)));
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUeq)) );
|
||||
|
||||
__ cmp_d(LT, f2, f4, f6);
|
||||
__ cmp_s(LT, f12, f14, f16);
|
||||
__ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOlt)) );
|
||||
__ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOlt)));
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOlt)) );
|
||||
|
||||
__ cmp_d(ULT, f2, f4, f6);
|
||||
__ cmp_s(ULT, f12, f14, f16);
|
||||
__ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUlt)) );
|
||||
__ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUlt)));
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUlt)) );
|
||||
|
||||
__ cmp_d(LE, f2, f4, f6);
|
||||
__ cmp_s(LE, f12, f14, f16);
|
||||
__ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOle)) );
|
||||
__ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOle)));
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOle)) );
|
||||
|
||||
__ cmp_d(ULE, f2, f4, f6);
|
||||
__ cmp_s(ULE, f12, f14, f16);
|
||||
__ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUle)) );
|
||||
__ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUle)));
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUle)) );
|
||||
|
||||
__ cmp_d(ORD, f2, f4, f6);
|
||||
__ cmp_s(ORD, f12, f14, f16);
|
||||
__ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOr)) );
|
||||
__ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOr)));
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOr)) );
|
||||
|
||||
__ cmp_d(UNE, f2, f4, f6);
|
||||
__ cmp_s(UNE, f12, f14, f16);
|
||||
__ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUne)) );
|
||||
__ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUne)));
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUne)) );
|
||||
|
||||
__ cmp_d(NE, f2, f4, f6);
|
||||
__ cmp_s(NE, f12, f14, f16);
|
||||
__ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dNe)) );
|
||||
__ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dNe)));
|
||||
__ swc1(f12, MemOperand(a0, offsetof(TestFloat, fNe)) );
|
||||
|
||||
__ jr(ra);
|
||||
@ -4225,27 +4225,27 @@ TEST(CVT) {
|
||||
__ nop(); \
|
||||
__ z##c1(f0, MemOperand(a0, offsetof(TestFloat, x##_out)));
|
||||
|
||||
GENERATE_CVT_TEST(cvt_d_s, lw, sd)
|
||||
GENERATE_CVT_TEST(cvt_d_w, lw, sd)
|
||||
GENERATE_CVT_TEST(cvt_d_s, lw, Sd)
|
||||
GENERATE_CVT_TEST(cvt_d_w, lw, Sd)
|
||||
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
|
||||
IsFp64Mode()) {
|
||||
GENERATE_CVT_TEST(cvt_d_l, ld, sd)
|
||||
GENERATE_CVT_TEST(cvt_d_l, Ld, Sd)
|
||||
}
|
||||
|
||||
if (IsFp64Mode()) {
|
||||
GENERATE_CVT_TEST(cvt_l_s, lw, sd)
|
||||
GENERATE_CVT_TEST(cvt_l_d, ld, sd)
|
||||
GENERATE_CVT_TEST(cvt_l_s, lw, Sd)
|
||||
GENERATE_CVT_TEST(cvt_l_d, Ld, Sd)
|
||||
}
|
||||
|
||||
GENERATE_CVT_TEST(cvt_s_d, ld, sw)
|
||||
GENERATE_CVT_TEST(cvt_s_d, Ld, sw)
|
||||
GENERATE_CVT_TEST(cvt_s_w, lw, sw)
|
||||
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
|
||||
IsFp64Mode()) {
|
||||
GENERATE_CVT_TEST(cvt_s_l, ld, sw)
|
||||
GENERATE_CVT_TEST(cvt_s_l, Ld, sw)
|
||||
}
|
||||
|
||||
GENERATE_CVT_TEST(cvt_w_s, lw, sw)
|
||||
GENERATE_CVT_TEST(cvt_w_d, ld, sw)
|
||||
GENERATE_CVT_TEST(cvt_w_d, Ld, sw)
|
||||
|
||||
// Restore FCSR.
|
||||
__ ctc1(a1, FCSR);
|
||||
@ -4447,11 +4447,11 @@ TEST(DIV_FMT) {
|
||||
// Disable FPU exceptions.
|
||||
__ ctc1(zero_reg, FCSR);
|
||||
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(Test, dOp1)) );
|
||||
__ ldc1(f2, MemOperand(a0, offsetof(Test, dOp2)) );
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(Test, dOp1)));
|
||||
__ Ldc1(f2, MemOperand(a0, offsetof(Test, dOp2)));
|
||||
__ nop();
|
||||
__ div_d(f6, f4, f2);
|
||||
__ sdc1(f6, MemOperand(a0, offsetof(Test, dRes)) );
|
||||
__ Sdc1(f6, MemOperand(a0, offsetof(Test, dRes)));
|
||||
|
||||
__ lwc1(f4, MemOperand(a0, offsetof(Test, fOp1)) );
|
||||
__ lwc1(f2, MemOperand(a0, offsetof(Test, fOp2)) );
|
||||
@ -5428,10 +5428,10 @@ void helper_madd_msub_maddf_msubf(F func) {
|
||||
__ lwc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft)));
|
||||
__ lwc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
|
||||
} else if (std::is_same<T, double>::value) {
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
|
||||
__ ldc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs)));
|
||||
__ ldc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft)));
|
||||
__ ldc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
|
||||
__ Ldc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs)));
|
||||
__ Ldc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft)));
|
||||
__ Ldc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
@ -5487,9 +5487,9 @@ TEST(madd_msub_d) {
|
||||
if (!IsMipsArchVariant(kMips32r2)) return;
|
||||
helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) {
|
||||
__ madd_d(f10, f4, f6, f8);
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
|
||||
__ msub_d(f16, f4, f6, f8);
|
||||
__ sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
|
||||
__ Sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
|
||||
});
|
||||
}
|
||||
|
||||
@ -5507,9 +5507,9 @@ TEST(maddf_msubf_d) {
|
||||
if (!IsMipsArchVariant(kMips32r6)) return;
|
||||
helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) {
|
||||
__ maddf_d(f4, f6, f8);
|
||||
__ sdc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
|
||||
__ Sdc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
|
||||
__ msubf_d(f16, f6, f8);
|
||||
__ sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
|
||||
__ Sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
|
||||
}
|
||||
// Push the double argument.
|
||||
__ Subu(sp, sp, Operand(kDoubleSize));
|
||||
__ sdc1(f12, MemOperand(sp));
|
||||
__ Sdc1(f12, MemOperand(sp));
|
||||
__ Move(source_reg, sp);
|
||||
|
||||
// Save registers make sure they don't get clobbered.
|
||||
@ -94,11 +94,11 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
|
||||
|
||||
// Re-push the double argument.
|
||||
__ Subu(sp, sp, Operand(kDoubleSize));
|
||||
__ sdc1(f12, MemOperand(sp));
|
||||
__ Sdc1(f12, MemOperand(sp));
|
||||
|
||||
// Call through to the actual stub
|
||||
if (inline_fastpath) {
|
||||
__ ldc1(f12, MemOperand(source_reg));
|
||||
__ Ldc1(f12, MemOperand(source_reg));
|
||||
__ TryInlineTruncateDoubleToI(destination_reg, f12, &done);
|
||||
if (destination_reg.is(source_reg) && !source_reg.is(sp)) {
|
||||
// Restore clobbered source_reg.
|
||||
|
@ -1009,7 +1009,7 @@ TEST(min_max_nan) {
|
||||
auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) {
|
||||
__ bind(nan);
|
||||
__ LoadRoot(at, Heap::kNanValueRootIndex);
|
||||
__ ldc1(dst, FieldMemOperand(at, HeapNumber::kValueOffset));
|
||||
__ Ldc1(dst, FieldMemOperand(at, HeapNumber::kValueOffset));
|
||||
__ Branch(back);
|
||||
};
|
||||
|
||||
@ -1024,8 +1024,8 @@ TEST(min_max_nan) {
|
||||
|
||||
__ push(s6);
|
||||
__ InitializeRootRegister();
|
||||
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
__ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
|
||||
__ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
|
||||
__ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
|
||||
__ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e)));
|
||||
__ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f)));
|
||||
__ Float64Min(f10, f4, f8, &handle_mind_nan);
|
||||
@ -1036,8 +1036,8 @@ TEST(min_max_nan) {
|
||||
__ bind(&back_mins_nan);
|
||||
__ Float32Max(f16, f2, f6, &handle_maxs_nan);
|
||||
__ bind(&back_maxs_nan);
|
||||
__ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
|
||||
__ sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
|
||||
__ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
|
||||
__ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
|
||||
__ swc1(f14, MemOperand(a0, offsetof(TestFloat, g)));
|
||||
__ swc1(f16, MemOperand(a0, offsetof(TestFloat, h)));
|
||||
__ pop(s6);
|
||||
@ -1521,11 +1521,11 @@ static ::F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
|
||||
Label done_max_abc, done_max_aab, done_max_aba;
|
||||
|
||||
#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \
|
||||
__ ldc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \
|
||||
__ ldc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \
|
||||
__ Ldc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \
|
||||
__ Ldc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \
|
||||
__ fminmax(res, x, y, &ool); \
|
||||
__ bind(&done); \
|
||||
__ sdc1(a, MemOperand(a1, offsetof(Results, res_field)))
|
||||
__ Sdc1(a, MemOperand(a1, offsetof(Results, res_field)))
|
||||
|
||||
// a = min(b, c);
|
||||
FLOAT_MIN_MAX(Float64Min, a, b, c, done_min_abc, ool_min_abc, min_abc_);
|
||||
|
Loading…
Reference in New Issue
Block a user