From e0401f3f712353e53d936aeaaa8437aaea9a1c56 Mon Sep 17 00:00:00 2001 From: "dusan.milosavljevic@imgtec.com" Date: Tue, 29 Jul 2014 18:02:26 +0000 Subject: [PATCH] MIPS64: Add support for architecture revision 6. TEST= BUG= R=jkummerow@chromium.org, paul.lind@imgtec.com Review URL: https://codereview.chromium.org/426863006 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22681 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- build/toolchain.gypi | 17 +- src/mips64/assembler-mips64.cc | 514 ++++++++++++++++++++++++++- src/mips64/assembler-mips64.h | 160 ++++++++- src/mips64/code-stubs-mips64.cc | 31 +- src/mips64/constants-mips64.cc | 4 + src/mips64/constants-mips64.h | 102 +++++- src/mips64/disasm-mips64.cc | 452 ++++++++++++++++++++--- src/mips64/full-codegen-mips64.cc | 19 +- src/mips64/lithium-codegen-mips64.cc | 48 +-- src/mips64/lithium-mips64.cc | 4 +- src/mips64/lithium-mips64.h | 6 +- src/mips64/macro-assembler-mips64.cc | 360 +++++++++++++------ src/mips64/macro-assembler-mips64.h | 4 + src/mips64/simulator-mips64.cc | 239 ++++++++++--- test/cctest/test-api.cc | 4 +- test/cctest/test-assembler-mips64.cc | 245 +++++++------ test/cctest/test-disasm-mips64.cc | 331 ++++++++++++----- test/mjsunit/mjsunit.status | 3 + 18 files changed, 2033 insertions(+), 510 deletions(-) diff --git a/build/toolchain.gypi b/build/toolchain.gypi index 929461351b..496cef0dbd 100644 --- a/build/toolchain.gypi +++ b/build/toolchain.gypi @@ -376,6 +376,14 @@ 'cflags': ['-msoft-float'], 'ldflags': ['-msoft-float'], }], + ['mips_arch_variant=="r6"', { + 'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'], + 'ldflags': [ + '-mips64r6', '-mabi=64', + '-Wl,--dynamic-linker=$(LDSO_PATH)', + '-Wl,--rpath=$(LD_R_PATH)', + ], + }], ['mips_arch_variant=="r2"', { 'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'], 'ldflags': [ @@ -384,9 +392,6 @@ '-Wl,--rpath=$(LD_R_PATH)', ], }], - ['mips_arch_variant=="loongson"', { - 'cflags': ['-mips3', '-Wa,-mips3'], - }], ], }], ], @@ -406,12 +411,12 @@ '__mips_soft_float=1' ], }], + ['mips_arch_variant=="r6"', { + 'defines': ['_MIPS_ARCH_MIPS64R6',], + }], ['mips_arch_variant=="r2"', { 'defines': ['_MIPS_ARCH_MIPS64R2',], }], - ['mips_arch_variant=="loongson"', { - 'defines': ['_MIPS_ARCH_LOONGSON',], - }], ], }], # v8_target_arch=="mips64el" ['v8_target_arch=="x64"', { diff --git a/src/mips64/assembler-mips64.cc b/src/mips64/assembler-mips64.cc index 2796c95b37..48cb90a0e7 100644 --- a/src/mips64/assembler-mips64.cc +++ b/src/mips64/assembler-mips64.cc @@ -485,7 +485,9 @@ bool Assembler::IsBranch(Instr instr) { opcode == BGTZL || (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ || rt_field == BLTZAL || rt_field == BGEZAL)) || - (opcode == COP1 && rs_field == BC1); // Coprocessor branch. + (opcode == COP1 && rs_field == BC1) || // Coprocessor branch. + (opcode == COP1 && rs_field == BC1EQZ) || + (opcode == COP1 && rs_field == BC1NEZ); } @@ -969,7 +971,6 @@ void Assembler::GenInstrJump(Opcode opcode, // Returns the next free trampoline entry. int32_t Assembler::get_trampoline_entry(int32_t pos) { int32_t trampoline_entry = kInvalidSlotPos; - if (!internal_trampoline_exception_) { if (trampoline_.start() > pos) { trampoline_entry = trampoline_.take_slot(); @@ -985,7 +986,6 @@ int32_t Assembler::get_trampoline_entry(int32_t pos) { uint64_t Assembler::jump_address(Label* L) { int64_t target_pos; - if (L->is_bound()) { target_pos = L->pos(); } else { @@ -1007,7 +1007,6 @@ uint64_t Assembler::jump_address(Label* L) { int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { int32_t target_pos; - if (L->is_bound()) { target_pos = L->pos(); } else { @@ -1032,6 +1031,86 @@ int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { } +int32_t Assembler::branch_offset_compact(Label* L, + bool jump_elimination_allowed) { + int32_t target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); + L->link_to(pc_offset()); + } else { + L->link_to(pc_offset()); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + return kEndOfChain; + } + } + + int32_t offset = target_pos - pc_offset(); + ASSERT((offset & 3) == 0); + ASSERT(is_int16(offset >> 2)); + + return offset; +} + + +int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) { + int32_t target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); + L->link_to(pc_offset()); + } else { + L->link_to(pc_offset()); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + return kEndOfChain; + } + } + + int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); + ASSERT((offset & 3) == 0); + ASSERT(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width. + + return offset; +} + + +int32_t Assembler::branch_offset21_compact(Label* L, + bool jump_elimination_allowed) { + int32_t target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); + L->link_to(pc_offset()); + } else { + L->link_to(pc_offset()); + if (!trampoline_emitted_) { + unbound_labels_count_++; + next_buffer_check_ -= kTrampolineSlotsSize; + } + return kEndOfChain; + } + } + + int32_t offset = target_pos - pc_offset(); + ASSERT((offset & 3) == 0); + ASSERT(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width. + + return offset; +} + + void Assembler::label_at_put(Label* L, int at_offset) { int target_pos; if (L->is_bound()) { @@ -1085,7 +1164,33 @@ void Assembler::bgez(Register rs, int16_t offset) { } +void Assembler::bgezc(Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rt.is(zero_reg))); + GenInstrImmediate(BLEZL, rt, rt, offset); +} + + +void Assembler::bgeuc(Register rs, Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rs.is(zero_reg))); + ASSERT(!(rt.is(zero_reg))); + ASSERT(rs.code() != rt.code()); + GenInstrImmediate(BLEZ, rs, rt, offset); +} + + +void Assembler::bgec(Register rs, Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rs.is(zero_reg))); + ASSERT(!(rt.is(zero_reg))); + ASSERT(rs.code() != rt.code()); + GenInstrImmediate(BLEZL, rs, rt, offset); +} + + void Assembler::bgezal(Register rs, int16_t offset) { + ASSERT(kArchVariant != kMips64r6 || rs.is(zero_reg)); BlockTrampolinePoolScope block_trampoline_pool(this); positions_recorder()->WriteRecordedPositions(); GenInstrImmediate(REGIMM, rs, BGEZAL, offset); @@ -1100,6 +1205,13 @@ void Assembler::bgtz(Register rs, int16_t offset) { } +void Assembler::bgtzc(Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rt.is(zero_reg))); + GenInstrImmediate(BGTZL, zero_reg, rt, offset); +} + + void Assembler::blez(Register rs, int16_t offset) { BlockTrampolinePoolScope block_trampoline_pool(this); GenInstrImmediate(BLEZ, rs, zero_reg, offset); @@ -1107,6 +1219,38 @@ void Assembler::blez(Register rs, int16_t offset) { } +void Assembler::blezc(Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rt.is(zero_reg))); + GenInstrImmediate(BLEZL, zero_reg, rt, offset); +} + + +void Assembler::bltzc(Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rt.is(zero_reg))); + GenInstrImmediate(BGTZL, rt, rt, offset); +} + + +void Assembler::bltuc(Register rs, Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rs.is(zero_reg))); + ASSERT(!(rt.is(zero_reg))); + ASSERT(rs.code() != rt.code()); + GenInstrImmediate(BGTZ, rs, rt, offset); +} + + +void Assembler::bltc(Register rs, Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rs.is(zero_reg))); + ASSERT(!(rt.is(zero_reg))); + ASSERT(rs.code() != rt.code()); + GenInstrImmediate(BGTZL, rs, rt, offset); +} + + void Assembler::bltz(Register rs, int16_t offset) { BlockTrampolinePoolScope block_trampoline_pool(this); GenInstrImmediate(REGIMM, rs, BLTZ, offset); @@ -1115,6 +1259,7 @@ void Assembler::bltz(Register rs, int16_t offset) { void Assembler::bltzal(Register rs, int16_t offset) { + ASSERT(kArchVariant != kMips64r6 || rs.is(zero_reg)); BlockTrampolinePoolScope block_trampoline_pool(this); positions_recorder()->WriteRecordedPositions(); GenInstrImmediate(REGIMM, rs, BLTZAL, offset); @@ -1129,6 +1274,101 @@ void Assembler::bne(Register rs, Register rt, int16_t offset) { } +void Assembler::bovc(Register rs, Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rs.is(zero_reg))); + ASSERT(rs.code() >= rt.code()); + GenInstrImmediate(ADDI, rs, rt, offset); +} + + +void Assembler::bnvc(Register rs, Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rs.is(zero_reg))); + ASSERT(rs.code() >= rt.code()); + GenInstrImmediate(DADDI, rs, rt, offset); +} + + +void Assembler::blezalc(Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rt.is(zero_reg))); + GenInstrImmediate(BLEZ, zero_reg, rt, offset); +} + + +void Assembler::bgezalc(Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rt.is(zero_reg))); + GenInstrImmediate(BLEZ, rt, rt, offset); +} + + +void Assembler::bgezall(Register rs, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rs.is(zero_reg))); + GenInstrImmediate(REGIMM, rs, BGEZALL, offset); +} + + +void Assembler::bltzalc(Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rt.is(zero_reg))); + GenInstrImmediate(BGTZ, rt, rt, offset); +} + + +void Assembler::bgtzalc(Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rt.is(zero_reg))); + GenInstrImmediate(BGTZ, zero_reg, rt, offset); +} + + +void Assembler::beqzalc(Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rt.is(zero_reg))); + GenInstrImmediate(ADDI, zero_reg, rt, offset); +} + + +void Assembler::bnezalc(Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rt.is(zero_reg))); + GenInstrImmediate(DADDI, zero_reg, rt, offset); +} + + +void Assembler::beqc(Register rs, Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(rs.code() < rt.code()); + GenInstrImmediate(ADDI, rs, rt, offset); +} + + +void Assembler::beqzc(Register rs, int32_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rs.is(zero_reg))); + Instr instr = BEQZC | (rs.code() << kRsShift) | offset; + emit(instr); +} + + +void Assembler::bnec(Register rs, Register rt, int16_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(rs.code() < rt.code()); + GenInstrImmediate(DADDI, rs, rt, offset); +} + + +void Assembler::bnezc(Register rs, int32_t offset) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(!(rs.is(zero_reg))); + Instr instr = BNEZC | (rs.code() << kRsShift) | offset; + emit(instr); +} + + void Assembler::j(int64_t target) { #if DEBUG // Get pc of delay slot. @@ -1142,12 +1382,16 @@ void Assembler::j(int64_t target) { void Assembler::jr(Register rs) { - BlockTrampolinePoolScope block_trampoline_pool(this); - if (rs.is(ra)) { - positions_recorder()->WriteRecordedPositions(); + if (kArchVariant != kMips64r6) { + BlockTrampolinePoolScope block_trampoline_pool(this); + if (rs.is(ra)) { + positions_recorder()->WriteRecordedPositions(); + } + GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR); + BlockTrampolinePoolFor(1); // For associated delay slot. + } else { + jalr(rs, zero_reg); } - GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR); - BlockTrampolinePoolFor(1); // For associated delay slot. } @@ -1218,16 +1462,64 @@ void Assembler::subu(Register rd, Register rs, Register rt) { void Assembler::mul(Register rd, Register rs, Register rt) { - GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL); + if (kArchVariant == kMips64r6) { + GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH); + } else { + GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL); + } +} + + +void Assembler::muh(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH); +} + + +void Assembler::mulu(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U); +} + + +void Assembler::muhu(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U); +} + + +void Assembler::dmul(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH); +} + + +void Assembler::dmuh(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH); +} + + +void Assembler::dmulu(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U); +} + + +void Assembler::dmuhu(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U); } void Assembler::mult(Register rs, Register rt) { + ASSERT(kArchVariant != kMips64r6); GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT); } void Assembler::multu(Register rs, Register rt) { + ASSERT(kArchVariant != kMips64r6); GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU); } @@ -1242,11 +1534,35 @@ void Assembler::div(Register rs, Register rt) { } +void Assembler::div(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD); +} + + +void Assembler::mod(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD); +} + + void Assembler::divu(Register rs, Register rt) { GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU); } +void Assembler::divu(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U); +} + + +void Assembler::modu(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U); +} + + void Assembler::daddu(Register rd, Register rs, Register rt) { GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU); } @@ -1272,11 +1588,35 @@ void Assembler::ddiv(Register rs, Register rt) { } +void Assembler::ddiv(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD); +} + + +void Assembler::dmod(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD); +} + + void Assembler::ddivu(Register rs, Register rt) { GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU); } +void Assembler::ddivu(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U); +} + + +void Assembler::dmodu(Register rd, Register rs, Register rt) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U); +} + + // Logical. void Assembler::and_(Register rd, Register rs, Register rt) { @@ -1566,6 +1906,32 @@ void Assembler::lui(Register rd, int32_t j) { } +void Assembler::aui(Register rs, Register rt, int32_t j) { + // This instruction uses same opcode as 'lui'. The difference in encoding is + // 'lui' has zero reg. for rs field. + ASSERT(is_uint16(j)); + GenInstrImmediate(LUI, rs, rt, j); +} + + +void Assembler::daui(Register rs, Register rt, int32_t j) { + ASSERT(is_uint16(j)); + GenInstrImmediate(DAUI, rs, rt, j); +} + + +void Assembler::dahi(Register rs, int32_t j) { + ASSERT(is_uint16(j)); + GenInstrImmediate(REGIMM, rs, DAHI, j); +} + + +void Assembler::dati(Register rs, int32_t j) { + ASSERT(is_uint16(j)); + GenInstrImmediate(REGIMM, rs, DATI, j); +} + + void Assembler::ldl(Register rd, const MemOperand& rs) { GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_); } @@ -1747,17 +2113,73 @@ void Assembler::movf(Register rd, Register rs, uint16_t cc) { } +void Assembler::sel(SecondaryField fmt, FPURegister fd, + FPURegister ft, FPURegister fs, uint8_t sel) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(fmt == D); + ASSERT(fmt == S); + + Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift | + fs.code() << kFsShift | fd.code() << kFdShift | SEL; + emit(instr); +} + + +// GPR. +void Assembler::seleqz(Register rs, Register rt, Register rd) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S); +} + + +// FPR. +void Assembler::seleqz(SecondaryField fmt, FPURegister fd, + FPURegister ft, FPURegister fs) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(fmt == D); + ASSERT(fmt == S); + + Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift | + fs.code() << kFsShift | fd.code() << kFdShift | SELEQZ_C; + emit(instr); +} + + +// GPR. +void Assembler::selnez(Register rs, Register rt, Register rd) { + ASSERT(kArchVariant == kMips64r6); + GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S); +} + + +// FPR. +void Assembler::selnez(SecondaryField fmt, FPURegister fd, + FPURegister ft, FPURegister fs) { + ASSERT(kArchVariant == kMips64r6); + ASSERT(fmt == D); + ASSERT(fmt == S); + + Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift | + fs.code() << kFsShift | fd.code() << kFdShift | SELNEZ_C; + emit(instr); +} + + // Bit twiddling. void Assembler::clz(Register rd, Register rs) { - // Clz instr requires same GPR number in 'rd' and 'rt' fields. - GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ); + if (kArchVariant != kMips64r6) { + // Clz instr requires same GPR number in 'rd' and 'rt' fields. + GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ); + } else { + GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6); + } } void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { // Should be called via MacroAssembler::Ins. // Ins instr has 'rt' field as dest, and two uint5: msb, lsb. - ASSERT(kArchVariant == kMips64r2); + ASSERT((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6)); GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS); } @@ -1765,13 +2187,12 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) { // Should be called via MacroAssembler::Ext. // Ext instr has 'rt' field as dest, and two uint5: msb, lsb. - ASSERT(kArchVariant == kMips64r2); + ASSERT(kArchVariant == kMips64r2 || kArchVariant == kMips64r6); GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT); } void Assembler::pref(int32_t hint, const MemOperand& rs) { - ASSERT(kArchVariant != kLoongson); ASSERT(is_uint5(hint) && is_uint16(rs.offset_)); Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) | (rs.offset_); @@ -1870,7 +2291,6 @@ void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) { void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft) { - ASSERT(kArchVariant != kLoongson); GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D); } @@ -2006,6 +2426,38 @@ void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) { } +void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs) { + ASSERT(kArchVariant == kMips64r6); + ASSERT((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, MIN); +} + + +void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs) { + ASSERT(kArchVariant == kMips64r6); + ASSERT((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, MINA); +} + + +void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs) { + ASSERT(kArchVariant == kMips64r6); + ASSERT((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, MAX); +} + + +void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs) { + ASSERT(kArchVariant == kMips64r6); + ASSERT((fmt == D) || (fmt == S)); + GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA); +} + + void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W); } @@ -2038,12 +2490,38 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { } -// Conditions. +// Conditions for >= MIPSr6. +void Assembler::cmp(FPUCondition cond, SecondaryField fmt, + FPURegister fd, FPURegister fs, FPURegister ft) { + ASSERT(kArchVariant == kMips64r6); + ASSERT((fmt & ~(31 << kRsShift)) == 0); + Instr instr = COP1 | fmt | ft.code() << kFtShift | + fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond; + emit(instr); +} + + +void Assembler::bc1eqz(int16_t offset, FPURegister ft) { + ASSERT(kArchVariant == kMips64r6); + Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask); + emit(instr); +} + + +void Assembler::bc1nez(int16_t offset, FPURegister ft) { + ASSERT(kArchVariant == kMips64r6); + Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask); + emit(instr); +} + + +// Conditions for < MIPSr6. void Assembler::c(FPUCondition cond, SecondaryField fmt, FPURegister fs, FPURegister ft, uint16_t cc) { + ASSERT(kArchVariant != kMips64r6); ASSERT(is_uint3(cc)); ASSERT((fmt & ~(31 << kRsShift)) == 0); - Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift + Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift | cc << 8 | 3 << 4 | cond; emit(instr); } diff --git a/src/mips64/assembler-mips64.h b/src/mips64/assembler-mips64.h index 395ab77d8a..2345bf5afd 100644 --- a/src/mips64/assembler-mips64.h +++ b/src/mips64/assembler-mips64.h @@ -457,11 +457,20 @@ class Assembler : public AssemblerBase { // position. Links the label to the current position if it is still unbound. // Manages the jump elimination optimization if the second parameter is true. int32_t branch_offset(Label* L, bool jump_elimination_allowed); + int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed); + int32_t branch_offset21(Label* L, bool jump_elimination_allowed); + int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed); int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { int32_t o = branch_offset(L, jump_elimination_allowed); ASSERT((o & 3) == 0); // Assert the offset is aligned. return o >> 2; } + int32_t shifted_branch_offset_compact(Label* L, + bool jump_elimination_allowed) { + int32_t o = branch_offset_compact(L, jump_elimination_allowed); + ASSERT((o & 3) == 0); // Assert the offset is aligned. + return o >> 2; + } uint64_t jump_address(Label* L); // Puts a labels target address at the given position. @@ -617,15 +626,100 @@ class Assembler : public AssemblerBase { beq(rs, rt, branch_offset(L, false) >> 2); } void bgez(Register rs, int16_t offset); + void bgezc(Register rt, int16_t offset); + void bgezc(Register rt, Label* L) { + bgezc(rt, branch_offset_compact(L, false)>>2); + } + void bgeuc(Register rs, Register rt, int16_t offset); + void bgeuc(Register rs, Register rt, Label* L) { + bgeuc(rs, rt, branch_offset_compact(L, false)>>2); + } + void bgec(Register rs, Register rt, int16_t offset); + void bgec(Register rs, Register rt, Label* L) { + bgec(rs, rt, branch_offset_compact(L, false)>>2); + } void bgezal(Register rs, int16_t offset); + void bgezalc(Register rt, int16_t offset); + void bgezalc(Register rt, Label* L) { + bgezalc(rt, branch_offset_compact(L, false)>>2); + } + void bgezall(Register rs, int16_t offset); + void bgezall(Register rs, Label* L) { + bgezall(rs, branch_offset(L, false)>>2); + } void bgtz(Register rs, int16_t offset); + void bgtzc(Register rt, int16_t offset); + void bgtzc(Register rt, Label* L) { + bgtzc(rt, branch_offset_compact(L, false)>>2); + } void blez(Register rs, int16_t offset); + void blezc(Register rt, int16_t offset); + void blezc(Register rt, Label* L) { + blezc(rt, branch_offset_compact(L, false)>>2); + } void bltz(Register rs, int16_t offset); + void bltzc(Register rt, int16_t offset); + void bltzc(Register rt, Label* L) { + bltzc(rt, branch_offset_compact(L, false)>>2); + } + void bltuc(Register rs, Register rt, int16_t offset); + void bltuc(Register rs, Register rt, Label* L) { + bltuc(rs, rt, branch_offset_compact(L, false)>>2); + } + void bltc(Register rs, Register rt, int16_t offset); + void bltc(Register rs, Register rt, Label* L) { + bltc(rs, rt, branch_offset_compact(L, false)>>2); + } + void bltzal(Register rs, int16_t offset); + void blezalc(Register rt, int16_t offset); + void blezalc(Register rt, Label* L) { + blezalc(rt, branch_offset_compact(L, false)>>2); + } + void bltzalc(Register rt, int16_t offset); + void bltzalc(Register rt, Label* L) { + bltzalc(rt, branch_offset_compact(L, false)>>2); + } + void bgtzalc(Register rt, int16_t offset); + void bgtzalc(Register rt, Label* L) { + bgtzalc(rt, branch_offset_compact(L, false)>>2); + } + void beqzalc(Register rt, int16_t offset); + void beqzalc(Register rt, Label* L) { + beqzalc(rt, branch_offset_compact(L, false)>>2); + } + void beqc(Register rs, Register rt, int16_t offset); + void beqc(Register rs, Register rt, Label* L) { + beqc(rs, rt, branch_offset_compact(L, false)>>2); + } + void beqzc(Register rs, int32_t offset); + void beqzc(Register rs, Label* L) { + beqzc(rs, branch_offset21_compact(L, false)>>2); + } + void bnezalc(Register rt, int16_t offset); + void bnezalc(Register rt, Label* L) { + bnezalc(rt, branch_offset_compact(L, false)>>2); + } + void bnec(Register rs, Register rt, int16_t offset); + void bnec(Register rs, Register rt, Label* L) { + bnec(rs, rt, branch_offset_compact(L, false)>>2); + } + void bnezc(Register rt, int32_t offset); + void bnezc(Register rt, Label* L) { + bnezc(rt, branch_offset21_compact(L, false)>>2); + } void bne(Register rs, Register rt, int16_t offset); void bne(Register rs, Register rt, Label* L) { bne(rs, rt, branch_offset(L, false)>>2); } + void bovc(Register rs, Register rt, int16_t offset); + void bovc(Register rs, Register rt, Label* L) { + bovc(rs, rt, branch_offset_compact(L, false)>>2); + } + void bnvc(Register rs, Register rt, int16_t offset); + void bnvc(Register rs, Register rt, Label* L) { + bnvc(rs, rt, branch_offset_compact(L, false)>>2); + } // Never use the int16_t b(l)cond version with a branch offset // instead of using the Label* version. @@ -644,17 +738,34 @@ class Assembler : public AssemblerBase { // Arithmetic. void addu(Register rd, Register rs, Register rt); void subu(Register rd, Register rs, Register rt); - void mult(Register rs, Register rt); - void multu(Register rs, Register rt); + void div(Register rs, Register rt); void divu(Register rs, Register rt); + void ddiv(Register rs, Register rt); + void ddivu(Register rs, Register rt); + void div(Register rd, Register rs, Register rt); + void divu(Register rd, Register rs, Register rt); + void ddiv(Register rd, Register rs, Register rt); + void ddivu(Register rd, Register rs, Register rt); + void mod(Register rd, Register rs, Register rt); + void modu(Register rd, Register rs, Register rt); + void dmod(Register rd, Register rs, Register rt); + void dmodu(Register rd, Register rs, Register rt); + void mul(Register rd, Register rs, Register rt); + void muh(Register rd, Register rs, Register rt); + void mulu(Register rd, Register rs, Register rt); + void muhu(Register rd, Register rs, Register rt); + void mult(Register rs, Register rt); + void multu(Register rs, Register rt); + void dmul(Register rd, Register rs, Register rt); + void dmuh(Register rd, Register rs, Register rt); + void dmulu(Register rd, Register rs, Register rt); + void dmuhu(Register rd, Register rs, Register rt); void daddu(Register rd, Register rs, Register rt); void dsubu(Register rd, Register rs, Register rt); void dmult(Register rs, Register rt); void dmultu(Register rs, Register rt); - void ddiv(Register rs, Register rt); - void ddivu(Register rs, Register rt); void addiu(Register rd, Register rs, int32_t j); void daddiu(Register rd, Register rs, int32_t j); @@ -669,6 +780,10 @@ class Assembler : public AssemblerBase { void ori(Register rd, Register rs, int32_t j); void xori(Register rd, Register rs, int32_t j); void lui(Register rd, int32_t j); + void aui(Register rs, Register rt, int32_t j); + void daui(Register rs, Register rt, int32_t j); + void dahi(Register rs, int32_t j); + void dati(Register rs, int32_t j); // Shifts. // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop @@ -751,6 +866,15 @@ class Assembler : public AssemblerBase { void movt(Register rd, Register rs, uint16_t cc = 0); void movf(Register rd, Register rs, uint16_t cc = 0); + void sel(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs, uint8_t sel); + void seleqz(Register rs, Register rt, Register rd); + void seleqz(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs); + void selnez(Register rs, Register rt, Register rd); + void selnez(SecondaryField fmt, FPURegister fd, FPURegister ft, + FPURegister fs); + // Bit twiddling. void clz(Register rd, Register rs); void ins_(Register rt, Register rs, uint16_t pos, uint16_t size); @@ -810,6 +934,11 @@ class Assembler : public AssemblerBase { void ceil_l_s(FPURegister fd, FPURegister fs); void ceil_l_d(FPURegister fd, FPURegister fs); + void min(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs); + void mina(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs); + void max(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs); + void maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs); + void cvt_s_w(FPURegister fd, FPURegister fs); void cvt_s_l(FPURegister fd, FPURegister fs); void cvt_s_d(FPURegister fd, FPURegister fs); @@ -818,14 +947,31 @@ class Assembler : public AssemblerBase { void cvt_d_l(FPURegister fd, FPURegister fs); void cvt_d_s(FPURegister fd, FPURegister fs); - // Conditions and branches. + // Conditions and branches for MIPSr6. + void cmp(FPUCondition cond, SecondaryField fmt, + FPURegister fd, FPURegister ft, FPURegister fs); + + void bc1eqz(int16_t offset, FPURegister ft); + void bc1eqz(Label* L, FPURegister ft) { + bc1eqz(branch_offset(L, false)>>2, ft); + } + void bc1nez(int16_t offset, FPURegister ft); + void bc1nez(Label* L, FPURegister ft) { + bc1nez(branch_offset(L, false)>>2, ft); + } + + // Conditions and branches for non MIPSr6. void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, uint16_t cc = 0); void bc1f(int16_t offset, uint16_t cc = 0); - void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); } + void bc1f(Label* L, uint16_t cc = 0) { + bc1f(branch_offset(L, false)>>2, cc); + } void bc1t(int16_t offset, uint16_t cc = 0); - void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); } + void bc1t(Label* L, uint16_t cc = 0) { + bc1t(branch_offset(L, false)>>2, cc); + } void fcmp(FPURegister src1, const double src2, FPUCondition cond); // Check the code size generated from label to here. diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc index 6205e98cdd..f8b34cbd60 100644 --- a/src/mips64/code-stubs-mips64.cc +++ b/src/mips64/code-stubs-mips64.cc @@ -1016,17 +1016,28 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // Check if LESS condition is satisfied. If true, move conditionally // result to v0. - __ c(OLT, D, f12, f14); - __ Movt(v0, a4); - // Use previous check to store conditionally to v0 oposite condition - // (GREATER). If rhs is equal to lhs, this will be corrected in next - // check. - __ Movf(v0, a5); - // Check if EQUAL condition is satisfied. If true, move conditionally - // result to v0. - __ c(EQ, D, f12, f14); - __ Movt(v0, a6); + if (kArchVariant != kMips64r6) { + __ c(OLT, D, f12, f14); + __ Movt(v0, a4); + // Use previous check to store conditionally to v0 oposite condition + // (GREATER). If rhs is equal to lhs, this will be corrected in next + // check. + __ Movf(v0, a5); + // Check if EQUAL condition is satisfied. If true, move conditionally + // result to v0. + __ c(EQ, D, f12, f14); + __ Movt(v0, a6); + } else { + Label skip; + __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14); + __ mov(v0, a4); // Return LESS as result. + __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14); + __ mov(v0, a6); // Return EQUAL as result. + + __ mov(v0, a5); // Return GREATER as result. + __ bind(&skip); + } __ Ret(); __ bind(&nan); diff --git a/src/mips64/constants-mips64.cc b/src/mips64/constants-mips64.cc index 04f4bbc52f..dfd62430c2 100644 --- a/src/mips64/constants-mips64.cc +++ b/src/mips64/constants-mips64.cc @@ -295,6 +295,8 @@ Instruction::Type Instruction::InstructionType() const { case COP1: // Coprocessor instructions. switch (RsFieldRawNoAssert()) { case BC1: // Branch on coprocessor condition. + case BC1EQZ: + case BC1NEZ: return kImmediateType; default: return kRegisterType; @@ -322,6 +324,8 @@ Instruction::Type Instruction::InstructionType() const { case BNEL: case BLEZL: case BGTZL: + case BEQZC: + case BNEZC: case LB: case LH: case LWL: diff --git a/src/mips64/constants-mips64.h b/src/mips64/constants-mips64.h index 38e5aa3dd1..d3dd31da58 100644 --- a/src/mips64/constants-mips64.h +++ b/src/mips64/constants-mips64.h @@ -17,21 +17,17 @@ #define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n") enum ArchVariants { - kMips32r2, - kMips32r1, - kLoongson, - kMips64r2 + kMips64r2, + kMips64r6 }; #ifdef _MIPS_ARCH_MIPS64R2 static const ArchVariants kArchVariant = kMips64r2; -#elif _MIPS_ARCH_LOONGSON -// The loongson flag refers to the LOONGSON architectures based on MIPS-III, -// which predates (and is a subset of) the mips32r2 and r1 architectures. - static const ArchVariants kArchVariant = kLoongson; +#elif _MIPS_ARCH_MIPS64R6 + static const ArchVariants kArchVariant = kMips64r6; #else - static const ArchVariants kArchVariant = kMips64r1; + static const ArchVariants kArchVariant = kMips64r2; #endif @@ -228,6 +224,8 @@ const int kLuiShift = 16; const int kImm16Shift = 0; const int kImm16Bits = 16; +const int kImm21Shift = 0; +const int kImm21Bits = 21; const int kImm26Shift = 0; const int kImm26Bits = 26; const int kImm28Shift = 0; @@ -295,15 +293,17 @@ enum Opcode { ANDI = ((1 << 3) + 4) << kOpcodeShift, ORI = ((1 << 3) + 5) << kOpcodeShift, XORI = ((1 << 3) + 6) << kOpcodeShift, - LUI = ((1 << 3) + 7) << kOpcodeShift, + LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family. + DAUI = ((3 << 3) + 5) << kOpcodeShift, + BEQC = ((2 << 3) + 0) << kOpcodeShift, COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class. BEQL = ((2 << 3) + 4) << kOpcodeShift, BNEL = ((2 << 3) + 5) << kOpcodeShift, BLEZL = ((2 << 3) + 6) << kOpcodeShift, BGTZL = ((2 << 3) + 7) << kOpcodeShift, - DADDI = ((3 << 3) + 0) << kOpcodeShift, + DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC. DADDIU = ((3 << 3) + 1) << kOpcodeShift, LDL = ((3 << 3) + 2) << kOpcodeShift, LDR = ((3 << 3) + 3) << kOpcodeShift, @@ -330,6 +330,7 @@ enum Opcode { LWC1 = ((6 << 3) + 1) << kOpcodeShift, LLD = ((6 << 3) + 4) << kOpcodeShift, LDC1 = ((6 << 3) + 5) << kOpcodeShift, + BEQZC = ((6 << 3) + 6) << kOpcodeShift, LD = ((6 << 3) + 7) << kOpcodeShift, PREF = ((6 << 3) + 3) << kOpcodeShift, @@ -337,6 +338,7 @@ enum Opcode { SWC1 = ((7 << 3) + 1) << kOpcodeShift, SCD = ((7 << 3) + 4) << kOpcodeShift, SDC1 = ((7 << 3) + 5) << kOpcodeShift, + BNEZC = ((7 << 3) + 6) << kOpcodeShift, SD = ((7 << 3) + 7) << kOpcodeShift, COP1X = ((1 << 4) + 3) << kOpcodeShift @@ -359,6 +361,8 @@ enum SecondaryField { BREAK = ((1 << 3) + 5), MFHI = ((2 << 3) + 0), + CLZ_R6 = ((2 << 3) + 0), + CLO_R6 = ((2 << 3) + 1), MFLO = ((2 << 3) + 2), DSLLV = ((2 << 3) + 4), DSRLV = ((2 << 3) + 6), @@ -394,7 +398,9 @@ enum SecondaryField { TLT = ((6 << 3) + 2), TLTU = ((6 << 3) + 3), TEQ = ((6 << 3) + 4), + SELEQZ_S = ((6 << 3) + 5), TNE = ((6 << 3) + 6), + SELNEZ_S = ((6 << 3) + 7), DSLL = ((7 << 3) + 0), DSRL = ((7 << 3) + 2), @@ -402,6 +408,23 @@ enum SecondaryField { DSLL32 = ((7 << 3) + 4), DSRL32 = ((7 << 3) + 6), DSRA32 = ((7 << 3) + 7), + + // Multiply integers in r6. + MUL_MUH = ((3 << 3) + 0), // MUL, MUH. + MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U. + D_MUL_MUH = ((7 << 2) + 0), // DMUL, DMUH. + D_MUL_MUH_U = ((7 << 2) + 1), // DMUL_U, DMUH_U. + + MUL_OP = ((0 << 3) + 2), + MUH_OP = ((0 << 3) + 3), + DIV_OP = ((0 << 3) + 2), + MOD_OP = ((0 << 3) + 3), + + DIV_MOD = ((3 << 3) + 2), + DIV_MOD_U = ((3 << 3) + 3), + D_DIV_MOD = ((3 << 3) + 6), + D_DIV_MOD_U = ((3 << 3) + 7), + // drotr in special4? // SPECIAL2 Encoding of Function Field. @@ -426,6 +449,9 @@ enum SecondaryField { BGEZ = ((0 << 3) + 1) << 16, BLTZAL = ((2 << 3) + 0) << 16, BGEZAL = ((2 << 3) + 1) << 16, + BGEZALL = ((2 << 3) + 3) << 16, + DAHI = ((0 << 3) + 6) << 16, + DATI = ((3 << 3) + 6) << 16, // COP1 Encoding of rs Field. MFC1 = ((0 << 3) + 0) << 21, @@ -472,6 +498,10 @@ enum SecondaryField { TRUNC_W_D = ((1 << 3) + 5), CEIL_W_D = ((1 << 3) + 6), FLOOR_W_D = ((1 << 3) + 7), + MIN = ((3 << 3) + 4), + MINA = ((3 << 3) + 5), + MAX = ((3 << 3) + 6), + MAXA = ((3 << 3) + 7), CVT_S_D = ((4 << 3) + 0), CVT_W_D = ((4 << 3) + 4), CVT_L_D = ((4 << 3) + 5), @@ -488,6 +518,47 @@ enum SecondaryField { CVT_D_W = ((4 << 3) + 1), CVT_S_L = ((4 << 3) + 0), CVT_D_L = ((4 << 3) + 1), + BC1EQZ = ((2 << 2) + 1) << 21, + BC1NEZ = ((3 << 2) + 1) << 21, + // COP1 CMP positive predicates Bit 5..4 = 00. + CMP_AF = ((0 << 3) + 0), + CMP_UN = ((0 << 3) + 1), + CMP_EQ = ((0 << 3) + 2), + CMP_UEQ = ((0 << 3) + 3), + CMP_LT = ((0 << 3) + 4), + CMP_ULT = ((0 << 3) + 5), + CMP_LE = ((0 << 3) + 6), + CMP_ULE = ((0 << 3) + 7), + CMP_SAF = ((1 << 3) + 0), + CMP_SUN = ((1 << 3) + 1), + CMP_SEQ = ((1 << 3) + 2), + CMP_SUEQ = ((1 << 3) + 3), + CMP_SSLT = ((1 << 3) + 4), + CMP_SSULT = ((1 << 3) + 5), + CMP_SLE = ((1 << 3) + 6), + CMP_SULE = ((1 << 3) + 7), + // COP1 CMP negative predicates Bit 5..4 = 01. + CMP_AT = ((2 << 3) + 0), // Reserved, not implemented. + CMP_OR = ((2 << 3) + 1), + CMP_UNE = ((2 << 3) + 2), + CMP_NE = ((2 << 3) + 3), + CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented. + CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented. + CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented. + CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented. + CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented. + CMP_SOR = ((3 << 3) + 1), + CMP_SUNE = ((3 << 3) + 2), + CMP_SNE = ((3 << 3) + 3), + CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented. + CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented. + CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented. + CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented. + + SEL = ((2 << 3) + 0), + SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers. + SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers. + // COP1 Encoding of Function Field When rs=PS. // COP1X Encoding of Function Field. MADD_D = ((4 << 3) + 1), @@ -497,9 +568,9 @@ enum SecondaryField { // ----- Emulated conditions. -// On MIPS we use this enum to abstract from conditionnal branch instructions. +// On MIPS we use this enum to abstract from conditional branch instructions. // The 'U' prefix is used to specify unsigned comparisons. -// Oppposite conditions must be paired as odd/even numbers +// Opposite conditions must be paired as odd/even numbers // because 'NegateCondition' function flips LSB to negate condition. enum Condition { // Any value < 0 is considered no_condition. @@ -833,6 +904,11 @@ class Instruction { return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift); } + inline int32_t Imm21Value() const { + ASSERT(InstructionType() == kImmediateType); + return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift); + } + inline int32_t Imm26Value() const { ASSERT(InstructionType() == kJumpType); return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift); diff --git a/src/mips64/disasm-mips64.cc b/src/mips64/disasm-mips64.cc index 9b9cd348f7..f2aaa58bef 100644 --- a/src/mips64/disasm-mips64.cc +++ b/src/mips64/disasm-mips64.cc @@ -86,6 +86,7 @@ class Decoder { void PrintUImm16(Instruction* instr); void PrintSImm16(Instruction* instr); void PrintXImm16(Instruction* instr); + void PrintXImm21(Instruction* instr); void PrintXImm26(Instruction* instr); void PrintCode(Instruction* instr); // For break and trap instructions. // Printing of instruction name. @@ -247,6 +248,13 @@ void Decoder::PrintXImm16(Instruction* instr) { } +// Print 21-bit immediate value. +void Decoder::PrintXImm21(Instruction* instr) { + uint32_t imm = instr->Imm21Value(); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); +} + + // Print 26-bit immediate value. void Decoder::PrintXImm26(Instruction* instr) { uint32_t imm = instr->Imm26Value() << kImmFieldShift; @@ -361,7 +369,11 @@ int Decoder::FormatOption(Instruction* instr, const char* format) { PrintXImm16(instr); } return 6; - } else { + } else if (format[3] == '2' && format[4] == '1') { + ASSERT(STRING_STARTS_WITH(format, "imm21x")); + PrintXImm21(instr); + return 6; + } else if (format[3] == '2' && format[4] == '6') { ASSERT(STRING_STARTS_WITH(format, "imm26x")); PrintXImm26(instr); return 6; @@ -466,9 +478,6 @@ int Decoder::DecodeTypeRegister(Instruction* instr) { switch (instr->OpcodeFieldRaw()) { case COP1: // Coprocessor instructions. switch (instr->RsFieldRaw()) { - case BC1: // bc1 handled in DecodeTypeImmediate. - UNREACHABLE(); - break; case MFC1: Format(instr, "mfc1 'rt, 'fs"); break; @@ -582,14 +591,8 @@ int Decoder::DecodeTypeRegister(Instruction* instr) { break; } break; - case S: - UNIMPLEMENTED_MIPS(); - break; case W: switch (instr->FunctionFieldRaw()) { - case CVT_S_W: // Convert word to float (single). - Format(instr, "cvt.s.w 'fd, 'fs"); - break; case CVT_D_W: // Convert word to double. Format(instr, "cvt.d.w 'fd, 'fs"); break; @@ -605,13 +608,40 @@ int Decoder::DecodeTypeRegister(Instruction* instr) { case CVT_S_L: Format(instr, "cvt.s.l 'fd, 'fs"); break; + case CMP_UN: + Format(instr, "cmp.un.d 'fd, 'fs, 'ft"); + break; + case CMP_EQ: + Format(instr, "cmp.eq.d 'fd, 'fs, 'ft"); + break; + case CMP_UEQ: + Format(instr, "cmp.ueq.d 'fd, 'fs, 'ft"); + break; + case CMP_LT: + Format(instr, "cmp.lt.d 'fd, 'fs, 'ft"); + break; + case CMP_ULT: + Format(instr, "cmp.ult.d 'fd, 'fs, 'ft"); + break; + case CMP_LE: + Format(instr, "cmp.le.d 'fd, 'fs, 'ft"); + break; + case CMP_ULE: + Format(instr, "cmp.ule.d 'fd, 'fs, 'ft"); + break; + case CMP_OR: + Format(instr, "cmp.or.d 'fd, 'fs, 'ft"); + break; + case CMP_UNE: + Format(instr, "cmp.une.d 'fd, 'fs, 'ft"); + break; + case CMP_NE: + Format(instr, "cmp.ne.d 'fd, 'fs, 'ft"); + break; default: UNREACHABLE(); } break; - case PS: - UNIMPLEMENTED_MIPS(); - break; default: UNREACHABLE(); } @@ -634,13 +664,24 @@ int Decoder::DecodeTypeRegister(Instruction* instr) { Format(instr, "jalr 'rs"); break; case SLL: - if ( 0x0 == static_cast(instr->InstructionBits())) + if (0x0 == static_cast(instr->InstructionBits())) Format(instr, "nop"); else Format(instr, "sll 'rd, 'rt, 'sa"); break; case DSLL: - Format(instr, "dsll 'rd, 'rt, 'sa"); + Format(instr, "dsll 'rd, 'rt, 'sa"); + break; + case D_MUL_MUH: // Equals to DMUL. + if (kArchVariant != kMips64r6) { + Format(instr, "dmult 'rs, 'rt"); + } else { + if (instr->SaValue() == MUL_OP) { + Format(instr, "dmul 'rd, 'rs, 'rt"); + } else { + Format(instr, "dmuh 'rd, 'rs, 'rt"); + } + } break; case DSLL32: Format(instr, "dsll32 'rd, 'rt, 'sa"); @@ -714,34 +755,98 @@ int Decoder::DecodeTypeRegister(Instruction* instr) { Format(instr, "dsrav 'rd, 'rt, 'rs"); break; case MFHI: - Format(instr, "mfhi 'rd"); + if (instr->Bits(25, 16) == 0) { + Format(instr, "mfhi 'rd"); + } else { + if ((instr->FunctionFieldRaw() == CLZ_R6) + && (instr->FdValue() == 1)) { + Format(instr, "clz 'rd, 'rs"); + } else if ((instr->FunctionFieldRaw() == CLO_R6) + && (instr->FdValue() == 1)) { + Format(instr, "clo 'rd, 'rs"); + } + } break; case MFLO: Format(instr, "mflo 'rd"); break; - case MULT: - Format(instr, "mult 'rs, 'rt"); + case D_MUL_MUH_U: // Equals to DMULTU. + if (kArchVariant != kMips64r6) { + Format(instr, "dmultu 'rs, 'rt"); + } else { + if (instr->SaValue() == MUL_OP) { + Format(instr, "dmulu 'rd, 'rs, 'rt"); + } else { + Format(instr, "dmuhu 'rd, 'rs, 'rt"); + } + } break; - case DMULT: - Format(instr, "dmult 'rs, 'rt"); + case MULT: // @Mips64r6 == MUL_MUH. + if (kArchVariant != kMips64r6) { + Format(instr, "mult 'rs, 'rt"); + } else { + if (instr->SaValue() == MUL_OP) { + Format(instr, "mul 'rd, 'rs, 'rt"); + } else { + Format(instr, "muh 'rd, 'rs, 'rt"); + } + } break; - case MULTU: - Format(instr, "multu 'rs, 'rt"); + case MULTU: // @Mips64r6 == MUL_MUH_U. + if (kArchVariant != kMips64r6) { + Format(instr, "multu 'rs, 'rt"); + } else { + if (instr->SaValue() == MUL_OP) { + Format(instr, "mulu 'rd, 'rs, 'rt"); + } else { + Format(instr, "muhu 'rd, 'rs, 'rt"); + } + } + break; - case DMULTU: - Format(instr, "dmultu 'rs, 'rt"); + case DIV: // @Mips64r6 == DIV_MOD. + if (kArchVariant != kMips64r6) { + Format(instr, "div 'rs, 'rt"); + } else { + if (instr->SaValue() == DIV_OP) { + Format(instr, "div 'rd, 'rs, 'rt"); + } else { + Format(instr, "mod 'rd, 'rs, 'rt"); + } + } break; - case DIV: - Format(instr, "div 'rs, 'rt"); + case DDIV: // @Mips64r6 == D_DIV_MOD. + if (kArchVariant != kMips64r6) { + Format(instr, "ddiv 'rs, 'rt"); + } else { + if (instr->SaValue() == DIV_OP) { + Format(instr, "ddiv 'rd, 'rs, 'rt"); + } else { + Format(instr, "dmod 'rd, 'rs, 'rt"); + } + } break; - case DDIV: - Format(instr, "ddiv 'rs, 'rt"); + case DIVU: // @Mips64r6 == DIV_MOD_U. + if (kArchVariant != kMips64r6) { + Format(instr, "divu 'rs, 'rt"); + } else { + if (instr->SaValue() == DIV_OP) { + Format(instr, "divu 'rd, 'rs, 'rt"); + } else { + Format(instr, "modu 'rd, 'rs, 'rt"); + } + } break; - case DIVU: - Format(instr, "divu 'rs, 'rt"); - break; - case DDIVU: - Format(instr, "ddivu 'rs, 'rt"); + case DDIVU: // @Mips64r6 == D_DIV_MOD_U. + if (kArchVariant != kMips64r6) { + Format(instr, "ddivu 'rs, 'rt"); + } else { + if (instr->SaValue() == DIV_OP) { + Format(instr, "ddivu 'rd, 'rs, 'rt"); + } else { + Format(instr, "dmodu 'rd, 'rs, 'rt"); + } + } break; case ADD: Format(instr, "add 'rd, 'rs, 'rt"); @@ -824,6 +929,12 @@ int Decoder::DecodeTypeRegister(Instruction* instr) { Format(instr, "movf 'rd, 'rs, 'bc"); } break; + case SELEQZ_S: + Format(instr, "seleqz 'rd, 'rs, 'rt"); + break; + case SELNEZ_S: + Format(instr, "selnez 'rd, 'rs, 'rt"); + break; default: UNREACHABLE(); } @@ -834,7 +945,9 @@ int Decoder::DecodeTypeRegister(Instruction* instr) { Format(instr, "mul 'rd, 'rs, 'rt"); break; case CLZ: - Format(instr, "clz 'rd, 'rs"); + if (kArchVariant != kMips64r6) { + Format(instr, "clz 'rd, 'rs"); + } break; default: UNREACHABLE(); @@ -843,19 +956,11 @@ int Decoder::DecodeTypeRegister(Instruction* instr) { case SPECIAL3: switch (instr->FunctionFieldRaw()) { case INS: { - if (kArchVariant == kMips64r2) { - Format(instr, "ins 'rt, 'rs, 'sa, 'ss2"); - } else { - Unknown(instr); - } + Format(instr, "ins 'rt, 'rs, 'sa, 'ss2"); break; } case EXT: { - if (kArchVariant == kMips64r2) { - Format(instr, "ext 'rt, 'rs, 'sa, 'ss1"); - } else { - Unknown(instr); - } + Format(instr, "ext 'rt, 'rs, 'sa, 'ss1"); break; } default: @@ -871,7 +976,6 @@ int Decoder::DecodeTypeRegister(Instruction* instr) { void Decoder::DecodeTypeImmediate(Instruction* instr) { switch (instr->OpcodeFieldRaw()) { - // ------------- REGIMM class. case COP1: switch (instr->RsFieldRaw()) { case BC1: @@ -881,10 +985,150 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) { Format(instr, "bc1f 'bc, 'imm16u"); } break; + case BC1EQZ: + Format(instr, "bc1eqz 'ft, 'imm16u"); + break; + case BC1NEZ: + Format(instr, "bc1nez 'ft, 'imm16u"); + break; + case W: // CMP.S instruction. + switch (instr->FunctionValue()) { + case CMP_AF: + Format(instr, "cmp.af.S 'ft, 'fs, 'fd"); + break; + case CMP_UN: + Format(instr, "cmp.un.S 'ft, 'fs, 'fd"); + break; + case CMP_EQ: + Format(instr, "cmp.eq.S 'ft, 'fs, 'fd"); + break; + case CMP_UEQ: + Format(instr, "cmp.ueq.S 'ft, 'fs, 'fd"); + break; + case CMP_LT: + Format(instr, "cmp.lt.S 'ft, 'fs, 'fd"); + break; + case CMP_ULT: + Format(instr, "cmp.ult.S 'ft, 'fs, 'fd"); + break; + case CMP_LE: + Format(instr, "cmp.le.S 'ft, 'fs, 'fd"); + break; + case CMP_ULE: + Format(instr, "cmp.ule.S 'ft, 'fs, 'fd"); + break; + case CMP_OR: + Format(instr, "cmp.or.S 'ft, 'fs, 'fd"); + break; + case CMP_UNE: + Format(instr, "cmp.une.S 'ft, 'fs, 'fd"); + break; + case CMP_NE: + Format(instr, "cmp.ne.S 'ft, 'fs, 'fd"); + break; + default: + UNREACHABLE(); + } + break; + case L: // CMP.D instruction. + switch (instr->FunctionValue()) { + case CMP_AF: + Format(instr, "cmp.af.D 'ft, 'fs, 'fd"); + break; + case CMP_UN: + Format(instr, "cmp.un.D 'ft, 'fs, 'fd"); + break; + case CMP_EQ: + Format(instr, "cmp.eq.D 'ft, 'fs, 'fd"); + break; + case CMP_UEQ: + Format(instr, "cmp.ueq.D 'ft, 'fs, 'fd"); + break; + case CMP_LT: + Format(instr, "cmp.lt.D 'ft, 'fs, 'fd"); + break; + case CMP_ULT: + Format(instr, "cmp.ult.D 'ft, 'fs, 'fd"); + break; + case CMP_LE: + Format(instr, "cmp.le.D 'ft, 'fs, 'fd"); + break; + case CMP_ULE: + Format(instr, "cmp.ule.D 'ft, 'fs, 'fd"); + break; + case CMP_OR: + Format(instr, "cmp.or.D 'ft, 'fs, 'fd"); + break; + case CMP_UNE: + Format(instr, "cmp.une.D 'ft, 'fs, 'fd"); + break; + case CMP_NE: + Format(instr, "cmp.ne.D 'ft, 'fs, 'fd"); + break; + default: + UNREACHABLE(); + } + break; + case S: + switch (instr->FunctionValue()) { + case SEL: + Format(instr, "sel.S 'ft, 'fs, 'fd"); + break; + case SELEQZ_C: + Format(instr, "seleqz.S 'ft, 'fs, 'fd"); + break; + case SELNEZ_C: + Format(instr, "selnez.S 'ft, 'fs, 'fd"); + break; + case MIN: + Format(instr, "min.S 'ft, 'fs, 'fd"); + break; + case MINA: + Format(instr, "mina.S 'ft, 'fs, 'fd"); + break; + case MAX: + Format(instr, "max.S 'ft, 'fs, 'fd"); + break; + case MAXA: + Format(instr, "maxa.S 'ft, 'fs, 'fd"); + break; + default: + UNREACHABLE(); + } + break; + case D: + switch (instr->FunctionValue()) { + case SEL: + Format(instr, "sel.D 'ft, 'fs, 'fd"); + break; + case SELEQZ_C: + Format(instr, "seleqz.D 'ft, 'fs, 'fd"); + break; + case SELNEZ_C: + Format(instr, "selnez.D 'ft, 'fs, 'fd"); + break; + case MIN: + Format(instr, "min.D 'ft, 'fs, 'fd"); + break; + case MINA: + Format(instr, "mina.D 'ft, 'fs, 'fd"); + break; + case MAX: + Format(instr, "max.D 'ft, 'fs, 'fd"); + break; + case MAXA: + Format(instr, "maxa.D 'ft, 'fs, 'fd"); + break; + default: + UNREACHABLE(); + } + break; default: UNREACHABLE(); } + break; // Case COP1. + // ------------- REGIMM class. case REGIMM: switch (instr->RtFieldRaw()) { case BLTZ: @@ -899,6 +1143,15 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) { case BGEZAL: Format(instr, "bgezal 'rs, 'imm16u"); break; + case BGEZALL: + Format(instr, "bgezall 'rs, 'imm16u"); + break; + case DAHI: + Format(instr, "dahi 'rs, 'imm16u"); + break; + case DATI: + Format(instr, "dati 'rs, 'imm16u"); + break; default: UNREACHABLE(); } @@ -911,17 +1164,105 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) { Format(instr, "bne 'rs, 'rt, 'imm16u"); break; case BLEZ: - Format(instr, "blez 'rs, 'imm16u"); + if ((instr->RtFieldRaw() == 0) + && (instr->RsFieldRaw() != 0)) { + Format(instr, "blez 'rs, 'imm16u"); + } else if ((instr->RtFieldRaw() != instr->RsFieldRaw()) + && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) { + Format(instr, "bgeuc 'rs, 'rt, 'imm16u"); + } else if ((instr->RtFieldRaw() == instr->RsFieldRaw()) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "bgezalc 'rs, 'imm16u"); + } else if ((instr->RsFieldRaw() == 0) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "blezalc 'rs, 'imm16u"); + } else { + UNREACHABLE(); + } break; case BGTZ: - Format(instr, "bgtz 'rs, 'imm16u"); + if ((instr->RtFieldRaw() == 0) + && (instr->RsFieldRaw() != 0)) { + Format(instr, "bgtz 'rs, 'imm16u"); + } else if ((instr->RtFieldRaw() != instr->RsFieldRaw()) + && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) { + Format(instr, "bltuc 'rs, 'rt, 'imm16u"); + } else if ((instr->RtFieldRaw() == instr->RsFieldRaw()) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "bltzalc 'rt, 'imm16u"); + } else if ((instr->RsFieldRaw() == 0) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "bgtzalc 'rt, 'imm16u"); + } else { + UNREACHABLE(); + } + break; + case BLEZL: + if ((instr->RtFieldRaw() == instr->RsFieldRaw()) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "bgezc 'rt, 'imm16u"); + } else if ((instr->RtFieldRaw() != instr->RsFieldRaw()) + && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) { + Format(instr, "bgec 'rs, 'rt, 'imm16u"); + } else if ((instr->RsFieldRaw() == 0) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "blezc 'rt, 'imm16u"); + } else { + UNREACHABLE(); + } + break; + case BGTZL: + if ((instr->RtFieldRaw() == instr->RsFieldRaw()) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "bltzc 'rt, 'imm16u"); + } else if ((instr->RtFieldRaw() != instr->RsFieldRaw()) + && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) { + Format(instr, "bltc 'rs, 'rt, 'imm16u"); + } else if ((instr->RsFieldRaw() == 0) + && (instr->RtFieldRaw() != 0)) { + Format(instr, "bgtzc 'rt, 'imm16u"); + } else { + UNREACHABLE(); + } + break; + case BEQZC: + if (instr->RsFieldRaw() != 0) { + Format(instr, "beqzc 'rs, 'imm21x"); + } + break; + case BNEZC: + if (instr->RsFieldRaw() != 0) { + Format(instr, "bnezc 'rs, 'imm21x"); + } break; // ------------- Arithmetic instructions. case ADDI: - Format(instr, "addi 'rt, 'rs, 'imm16s"); + if (kArchVariant != kMips64r6) { + Format(instr, "addi 'rt, 'rs, 'imm16s"); + } else { + // Check if BOVC or BEQC instruction. + if (instr->RsFieldRaw() >= instr->RtFieldRaw()) { + Format(instr, "bovc 'rs, 'rt, 'imm16s"); + } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) { + Format(instr, "beqc 'rs, 'rt, 'imm16s"); + } else { + UNREACHABLE(); + } + } break; case DADDI: - Format(instr, "daddi 'rt, 'rs, 'imm16s"); + if (kArchVariant != kMips64r6) { + Format(instr, "daddi 'rt, 'rs, 'imm16s"); + } else { + // Check if BNVC or BNEC instruction. + if (instr->RsFieldRaw() >= instr->RtFieldRaw()) { + Format(instr, "bnvc 'rs, 'rt, 'imm16s"); + } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) { + Format(instr, "bnec 'rs, 'rt, 'imm16s"); + } else { + UNREACHABLE(); + } + } break; case ADDIU: Format(instr, "addiu 'rt, 'rs, 'imm16s"); @@ -945,7 +1286,18 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) { Format(instr, "xori 'rt, 'rs, 'imm16x"); break; case LUI: - Format(instr, "lui 'rt, 'imm16x"); + if (kArchVariant != kMips64r6) { + Format(instr, "lui 'rt, 'imm16x"); + } else { + if (instr->RsValue() != 0) { + Format(instr, "aui 'rt, 'imm16x"); + } else { + Format(instr, "lui 'rt, 'imm16x"); + } + } + break; + case DAUI: + Format(instr, "daui 'rt, 'imm16x"); break; // ------------- Memory instructions. case LB: diff --git a/src/mips64/full-codegen-mips64.cc b/src/mips64/full-codegen-mips64.cc index 212d917e39..efdcdfb33c 100644 --- a/src/mips64/full-codegen-mips64.cc +++ b/src/mips64/full-codegen-mips64.cc @@ -2364,14 +2364,12 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, __ BranchOnOverflow(&stub_call, scratch1); break; case Token::MUL: { - __ SmiUntag(scratch1, right); - __ Dmult(left, scratch1); - __ mflo(scratch1); - __ mfhi(scratch2); - __ dsra32(scratch1, scratch1, 31); - __ Branch(&stub_call, ne, scratch1, Operand(scratch2)); - __ mflo(v0); - __ Branch(&done, ne, v0, Operand(zero_reg)); + __ Dmulh(v0, left, right); + __ dsra32(scratch2, v0, 0); + __ sra(scratch1, v0, 31); + __ Branch(USE_DELAY_SLOT, &stub_call, ne, scratch2, Operand(scratch1)); + __ SmiTag(v0); + __ Branch(USE_DELAY_SLOT, &done, ne, v0, Operand(zero_reg)); __ Daddu(scratch2, right, left); __ Branch(&stub_call, lt, scratch2, Operand(zero_reg)); ASSERT(Smi::FromInt(0) == 0); @@ -3943,12 +3941,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); __ Dsubu(string_length, string_length, Operand(scratch1)); __ SmiUntag(scratch1); - __ Dmult(array_length, scratch1); + __ Dmul(scratch2, array_length, scratch1); // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are // zero. - __ mfhi(scratch2); + __ dsra32(scratch1, scratch2, 0); __ Branch(&bailout, ne, scratch2, Operand(zero_reg)); - __ mflo(scratch2); __ SmiUntag(string_length); __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3); __ BranchOnOverflow(&bailout, scratch3); diff --git a/src/mips64/lithium-codegen-mips64.cc b/src/mips64/lithium-codegen-mips64.cc index 4a625610d8..d1bdacd680 100644 --- a/src/mips64/lithium-codegen-mips64.cc +++ b/src/mips64/lithium-codegen-mips64.cc @@ -1119,7 +1119,7 @@ void LCodeGen::DoModI(LModI* instr) { const Register result_reg = ToRegister(instr->result()); // div runs in the background while we check for special cases. - __ ddiv(left_reg, right_reg); + __ Dmod(result_reg, left_reg, right_reg); Label done; // Check for x % 0, we have to deopt in this case because we can't return a @@ -1144,8 +1144,7 @@ void LCodeGen::DoModI(LModI* instr) { } // If we care about -0, test if the dividend is <0 and the result is 0. - __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg)); - __ mfhi(result_reg); + __ Branch(&done, ge, left_reg, Operand(zero_reg)); if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg)); @@ -1235,7 +1234,7 @@ void LCodeGen::DoDivI(LDivI* instr) { // On MIPS div is asynchronous - it will run in the background while we // check for special cases. - __ ddiv(dividend, divisor); + __ Ddiv(result, dividend, divisor); // Check for x / 0. if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { @@ -1260,11 +1259,14 @@ void LCodeGen::DoDivI(LDivI* instr) { } if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - __ mfhi(result); - DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg)); - __ mflo(result); - } else { - __ mflo(result); + // Calculate remainder. + Register remainder = ToRegister(instr->temp()); + if (kArchVariant != kMips64r6) { + __ mfhi(remainder); + } else { + __ dmod(remainder, dividend, divisor); + } + DeoptimizeIf(ne, instr->environment(), remainder, Operand(zero_reg)); } } @@ -1391,7 +1393,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { // On MIPS div is asynchronous - it will run in the background while we // check for special cases. - __ ddiv(dividend, divisor); + __ Ddiv(result, dividend, divisor); // Check for x / 0. if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { @@ -1418,8 +1420,11 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { // We performed a truncating division. Correct the result if necessary. Label done; Register remainder = scratch0(); - __ mfhi(remainder); - __ mflo(result); + if (kArchVariant != kMips64r6) { + __ mfhi(remainder); + } else { + __ dmod(remainder, dividend, divisor); + } __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT); __ Xor(remainder, remainder, Operand(divisor)); __ Branch(&done, ge, remainder, Operand(zero_reg)); @@ -1507,21 +1512,16 @@ void LCodeGen::DoMulI(LMulI* instr) { if (overflow) { // hi:lo = left * right. if (instr->hydrogen()->representation().IsSmi()) { - __ SmiUntag(result, left); - __ dmult(result, right); - __ mfhi(scratch); - __ mflo(result); + __ Dmulh(result, left, right); } else { - __ dmult(left, right); - __ mfhi(scratch); - __ mflo(result); + __ Dmul(result, left, right); + } + __ dsra32(scratch, result, 0); + __ sra(at, result, 31); + if (instr->hydrogen()->representation().IsSmi()) { + __ SmiTag(result); } - __ dsra32(at, result, 31); DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); - if (!instr->hydrogen()->representation().IsSmi()) { - DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt)); - DeoptimizeIf(lt, instr->environment(), result, Operand(kMinInt)); - } } else { if (instr->hydrogen()->representation().IsSmi()) { __ SmiUntag(result, left); diff --git a/src/mips64/lithium-mips64.cc b/src/mips64/lithium-mips64.cc index 1f5f578477..326ac8a794 100644 --- a/src/mips64/lithium-mips64.cc +++ b/src/mips64/lithium-mips64.cc @@ -1307,8 +1307,10 @@ LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { ASSERT(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); LOperand* divisor = UseRegister(instr->right()); + LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) + ? NULL : TempRegister(); LInstruction* result = - DefineAsRegister(new(zone()) LDivI(dividend, divisor)); + DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp)); if (instr->CheckFlag(HValue::kCanBeDivByZero) || instr->CheckFlag(HValue::kBailoutOnMinusZero) || (instr->CheckFlag(HValue::kCanOverflow) && diff --git a/src/mips64/lithium-mips64.h b/src/mips64/lithium-mips64.h index f58f0da22d..37f8efa5e2 100644 --- a/src/mips64/lithium-mips64.h +++ b/src/mips64/lithium-mips64.h @@ -689,15 +689,17 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LDivI(LOperand* dividend, LOperand* divisor) { + LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { inputs_[0] = dividend; inputs_[1] = divisor; + temps_[0] = temp; } LOperand* dividend() { return inputs_[0]; } LOperand* divisor() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc index dea3433086..bf5a6647ce 100644 --- a/src/mips64/macro-assembler-mips64.cc +++ b/src/mips64/macro-assembler-mips64.cc @@ -676,21 +676,33 @@ void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { - if (kArchVariant == kLoongson) { + mul(rd, rs, rt.rm()); + } else { + // li handles the relocation. + ASSERT(!rs.is(at)); + li(at, rt); + mul(rd, rs, at); + } +} + + +void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (kArchVariant != kMips64r6) { mult(rs, rt.rm()); - mflo(rd); + mfhi(rd); } else { - mul(rd, rs, rt.rm()); + muh(rd, rs, rt.rm()); } } else { // li handles the relocation. ASSERT(!rs.is(at)); li(at, rt); - if (kArchVariant == kLoongson) { + if (kArchVariant != kMips64r6) { mult(rs, at); - mflo(rd); + mfhi(rd); } else { - mul(rd, rs, at); + muh(rd, rs, at); } } } @@ -698,12 +710,9 @@ void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { - if (kArchVariant == kLoongson) { - dmult(rs, rt.rm()); - mflo(rd); + if (kArchVariant == kMips64r6) { + dmul(rd, rs, rt.rm()); } else { - // TODO(yuyin): - // dmul(rd, rs, rt.rm()); dmult(rs, rt.rm()); mflo(rd); } @@ -711,12 +720,9 @@ void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { // li handles the relocation. ASSERT(!rs.is(at)); li(at, rt); - if (kArchVariant == kLoongson) { - dmult(rs, at); - mflo(rd); + if (kArchVariant == kMips64r6) { + dmul(rd, rs, at); } else { - // TODO(yuyin): - // dmul(rd, rs, at); dmult(rs, at); mflo(rd); } @@ -724,6 +730,28 @@ void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { } +void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) { + if (rt.is_reg()) { + if (kArchVariant == kMips64r6) { + dmuh(rd, rs, rt.rm()); + } else { + dmult(rs, rt.rm()); + mfhi(rd); + } + } else { + // li handles the relocation. + ASSERT(!rs.is(at)); + li(at, rt); + if (kArchVariant == kMips64r6) { + dmuh(rd, rs, at); + } else { + dmult(rs, at); + mfhi(rd); + } + } +} + + void MacroAssembler::Mult(Register rs, const Operand& rt) { if (rt.is_reg()) { mult(rs, rt.rm()); @@ -796,6 +824,31 @@ void MacroAssembler::Ddiv(Register rs, const Operand& rt) { } +void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) { + if (kArchVariant != kMips64r6) { + if (rt.is_reg()) { + ddiv(rs, rt.rm()); + mflo(rd); + } else { + // li handles the relocation. + ASSERT(!rs.is(at)); + li(at, rt); + ddiv(rs, at); + mflo(rd); + } + } else { + if (rt.is_reg()) { + ddiv(rd, rs, rt.rm()); + } else { + // li handles the relocation. + ASSERT(!rs.is(at)); + li(at, rt); + ddiv(rd, rs, at); + } + } +} + + void MacroAssembler::Divu(Register rs, const Operand& rt) { if (rt.is_reg()) { divu(rs, rt.rm()); @@ -820,6 +873,31 @@ void MacroAssembler::Ddivu(Register rs, const Operand& rt) { } +void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) { + if (kArchVariant != kMips64r6) { + if (rt.is_reg()) { + ddiv(rs, rt.rm()); + mfhi(rd); + } else { + // li handles the relocation. + ASSERT(!rs.is(at)); + li(at, rt); + ddiv(rs, at); + mfhi(rd); + } + } else { + if (rt.is_reg()) { + dmod(rd, rs, rt.rm()); + } else { + // li handles the relocation. + ASSERT(!rs.is(at)); + li(at, rt); + dmod(rd, rs, at); + } + } +} + + void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { and_(rd, rs, rt.rm()); @@ -957,11 +1035,7 @@ void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) { void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { - if (kArchVariant == kLoongson) { - lw(zero_reg, rs); - } else { pref(hint, rs); - } } @@ -1385,49 +1459,102 @@ void MacroAssembler::BranchF(Label* target, ASSERT(nan || target); // Check for unordered (NaN) cases. if (nan) { - c(UN, D, cmp1, cmp2); - bc1t(nan); + if (kArchVariant != kMips64r6) { + c(UN, D, cmp1, cmp2); + bc1t(nan); + } else { + // Use f31 for comparison result. It has to be unavailable to lithium + // register allocator. + ASSERT(!cmp1.is(f31) && !cmp2.is(f31)); + cmp(UN, L, f31, cmp1, cmp2); + bc1nez(nan, f31); + } } - if (target) { - // Here NaN cases were either handled by this function or are assumed to - // have been handled by the caller. - // Unsigned conditions are treated as their signed counterpart. - switch (cc) { - case lt: - c(OLT, D, cmp1, cmp2); - bc1t(target); - break; - case gt: - c(ULE, D, cmp1, cmp2); - bc1f(target); - break; - case ge: - c(ULT, D, cmp1, cmp2); - bc1f(target); - break; - case le: - c(OLE, D, cmp1, cmp2); - bc1t(target); - break; - case eq: - c(EQ, D, cmp1, cmp2); - bc1t(target); - break; - case ueq: - c(UEQ, D, cmp1, cmp2); - bc1t(target); - break; - case ne: - c(EQ, D, cmp1, cmp2); - bc1f(target); - break; - case nue: - c(UEQ, D, cmp1, cmp2); - bc1f(target); - break; - default: - CHECK(0); + if (kArchVariant != kMips64r6) { + if (target) { + // Here NaN cases were either handled by this function or are assumed to + // have been handled by the caller. + switch (cc) { + case lt: + c(OLT, D, cmp1, cmp2); + bc1t(target); + break; + case gt: + c(ULE, D, cmp1, cmp2); + bc1f(target); + break; + case ge: + c(ULT, D, cmp1, cmp2); + bc1f(target); + break; + case le: + c(OLE, D, cmp1, cmp2); + bc1t(target); + break; + case eq: + c(EQ, D, cmp1, cmp2); + bc1t(target); + break; + case ueq: + c(UEQ, D, cmp1, cmp2); + bc1t(target); + break; + case ne: + c(EQ, D, cmp1, cmp2); + bc1f(target); + break; + case nue: + c(UEQ, D, cmp1, cmp2); + bc1f(target); + break; + default: + CHECK(0); + } + } + } else { + if (target) { + // Here NaN cases were either handled by this function or are assumed to + // have been handled by the caller. + // Unsigned conditions are treated as their signed counterpart. + // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode. + ASSERT(!cmp1.is(f31) && !cmp2.is(f31)); + switch (cc) { + case lt: + cmp(OLT, L, f31, cmp1, cmp2); + bc1nez(target, f31); + break; + case gt: + cmp(ULE, L, f31, cmp1, cmp2); + bc1eqz(target, f31); + break; + case ge: + cmp(ULT, L, f31, cmp1, cmp2); + bc1eqz(target, f31); + break; + case le: + cmp(OLE, L, f31, cmp1, cmp2); + bc1nez(target, f31); + break; + case eq: + cmp(EQ, L, f31, cmp1, cmp2); + bc1nez(target, f31); + break; + case ueq: + cmp(UEQ, L, f31, cmp1, cmp2); + bc1nez(target, f31); + break; + case ne: + cmp(EQ, L, f31, cmp1, cmp2); + bc1eqz(target, f31); + break; + case nue: + cmp(UEQ, L, f31, cmp1, cmp2); + bc1eqz(target, f31); + break; + default: + CHECK(0); + } } } @@ -1471,7 +1598,7 @@ void MacroAssembler::Move(FPURegister dst, double imm) { void MacroAssembler::Movz(Register rd, Register rs, Register rt) { - if (kArchVariant == kLoongson) { + if (kArchVariant == kMips64r6) { Label done; Branch(&done, ne, rt, Operand(zero_reg)); mov(rd, rs); @@ -1483,7 +1610,7 @@ void MacroAssembler::Movz(Register rd, Register rs, Register rt) { void MacroAssembler::Movn(Register rd, Register rs, Register rt) { - if (kArchVariant == kLoongson) { + if (kArchVariant == kMips64r6) { Label done; Branch(&done, eq, rt, Operand(zero_reg)); mov(rd, rs); @@ -2372,48 +2499,64 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond, // Signed comparison. case greater: + // rs > rt slt(scratch, r2, rs); - daddiu(scratch, scratch, -1); - bgezal(scratch, offset); + beq(scratch, zero_reg, 2); + nop(); + bal(offset); break; case greater_equal: + // rs >= rt slt(scratch, rs, r2); - daddiu(scratch, scratch, -1); - bltzal(scratch, offset); + bne(scratch, zero_reg, 2); + nop(); + bal(offset); break; case less: + // rs < r2 slt(scratch, rs, r2); - daddiu(scratch, scratch, -1); - bgezal(scratch, offset); + bne(scratch, zero_reg, 2); + nop(); + bal(offset); break; case less_equal: + // rs <= r2 slt(scratch, r2, rs); - daddiu(scratch, scratch, -1); - bltzal(scratch, offset); + bne(scratch, zero_reg, 2); + nop(); + bal(offset); break; + // Unsigned comparison. case Ugreater: + // rs > rt sltu(scratch, r2, rs); - daddiu(scratch, scratch, -1); - bgezal(scratch, offset); + beq(scratch, zero_reg, 2); + nop(); + bal(offset); break; case Ugreater_equal: + // rs >= rt sltu(scratch, rs, r2); - daddiu(scratch, scratch, -1); - bltzal(scratch, offset); + bne(scratch, zero_reg, 2); + nop(); + bal(offset); break; case Uless: + // rs < r2 sltu(scratch, rs, r2); - daddiu(scratch, scratch, -1); - bgezal(scratch, offset); + bne(scratch, zero_reg, 2); + nop(); + bal(offset); break; case Uless_equal: + // rs <= r2 sltu(scratch, r2, rs); - daddiu(scratch, scratch, -1); - bltzal(scratch, offset); + bne(scratch, zero_reg, 2); + nop(); + bal(offset); break; - default: UNREACHABLE(); } @@ -2470,54 +2613,71 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs, // Signed comparison. case greater: + // rs > rt slt(scratch, r2, rs); - daddiu(scratch, scratch, -1); + beq(scratch, zero_reg, 2); + nop(); offset = shifted_branch_offset(L, false); - bgezal(scratch, offset); + bal(offset); break; case greater_equal: + // rs >= rt slt(scratch, rs, r2); - daddiu(scratch, scratch, -1); + bne(scratch, zero_reg, 2); + nop(); offset = shifted_branch_offset(L, false); - bltzal(scratch, offset); + bal(offset); break; case less: + // rs < r2 slt(scratch, rs, r2); - daddiu(scratch, scratch, -1); + bne(scratch, zero_reg, 2); + nop(); offset = shifted_branch_offset(L, false); - bgezal(scratch, offset); + bal(offset); break; case less_equal: + // rs <= r2 slt(scratch, r2, rs); - daddiu(scratch, scratch, -1); + bne(scratch, zero_reg, 2); + nop(); offset = shifted_branch_offset(L, false); - bltzal(scratch, offset); + bal(offset); break; + // Unsigned comparison. case Ugreater: + // rs > rt sltu(scratch, r2, rs); - daddiu(scratch, scratch, -1); + beq(scratch, zero_reg, 2); + nop(); offset = shifted_branch_offset(L, false); - bgezal(scratch, offset); + bal(offset); break; case Ugreater_equal: + // rs >= rt sltu(scratch, rs, r2); - daddiu(scratch, scratch, -1); + bne(scratch, zero_reg, 2); + nop(); offset = shifted_branch_offset(L, false); - bltzal(scratch, offset); + bal(offset); break; case Uless: + // rs < r2 sltu(scratch, rs, r2); - daddiu(scratch, scratch, -1); + bne(scratch, zero_reg, 2); + nop(); offset = shifted_branch_offset(L, false); - bgezal(scratch, offset); + bal(offset); break; case Uless_equal: + // rs <= r2 sltu(scratch, r2, rs); - daddiu(scratch, scratch, -1); + bne(scratch, zero_reg, 2); + nop(); offset = shifted_branch_offset(L, false); - bltzal(scratch, offset); + bal(offset); break; default: @@ -5456,10 +5616,7 @@ void MacroAssembler::CheckPageFlag( int mask, Condition cc, Label* condition_met) { - // TODO(plind): Fix li() so we can use constant embedded inside And(). - // And(scratch, object, Operand(~Page::kPageAlignmentMask)); - li(at, Operand(~Page::kPageAlignmentMask), CONSTANT_SIZE); // plind HACK - And(scratch, object, at); + And(scratch, object, Operand(~Page::kPageAlignmentMask)); ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); And(scratch, scratch, Operand(mask)); Branch(condition_met, cc, scratch, Operand(zero_reg)); @@ -5933,8 +6090,7 @@ void MacroAssembler::TruncatingDiv(Register result, ASSERT(!result.is(at)); MultiplierAndShift ms(divisor); li(at, Operand(ms.multiplier())); - Mult(dividend, Operand(at)); - mfhi(result); + Mulh(result, dividend, Operand(at)); if (divisor > 0 && ms.multiplier() < 0) { Addu(result, result, Operand(dividend)); } diff --git a/src/mips64/macro-assembler-mips64.h b/src/mips64/macro-assembler-mips64.h index 3ba646343a..d2207624cb 100644 --- a/src/mips64/macro-assembler-mips64.h +++ b/src/mips64/macro-assembler-mips64.h @@ -606,10 +606,14 @@ class MacroAssembler: public Assembler { DEFINE_INSTRUCTION(Addu); DEFINE_INSTRUCTION(Daddu); + DEFINE_INSTRUCTION(Ddiv); DEFINE_INSTRUCTION(Subu); DEFINE_INSTRUCTION(Dsubu); + DEFINE_INSTRUCTION(Dmod); DEFINE_INSTRUCTION(Mul); + DEFINE_INSTRUCTION(Mulh); DEFINE_INSTRUCTION(Dmul); + DEFINE_INSTRUCTION(Dmulh); DEFINE_INSTRUCTION2(Mult); DEFINE_INSTRUCTION2(Dmult); DEFINE_INSTRUCTION2(Multu); diff --git a/src/mips64/simulator-mips64.cc b/src/mips64/simulator-mips64.cc index 86e8b91464..f4cad547bb 100644 --- a/src/mips64/simulator-mips64.cc +++ b/src/mips64/simulator-mips64.cc @@ -1955,9 +1955,6 @@ void Simulator::ConfigureTypeRegister(Instruction* instr, switch (op) { case COP1: // Coprocessor instructions. switch (instr->RsFieldRaw()) { - case BC1: // Handled in DecodeTypeImmed, should never come here. - UNREACHABLE(); - break; case CFC1: // At the moment only FCSR is supported. ASSERT(fs_reg == kFCSRRegister); @@ -1976,8 +1973,6 @@ void Simulator::ConfigureTypeRegister(Instruction* instr, case MTC1: case DMTC1: case MTHC1: - // Do the store in the execution step. - break; case S: case D: case W: @@ -1986,7 +1981,8 @@ void Simulator::ConfigureTypeRegister(Instruction* instr, // Do everything in the execution step. break; default: - UNIMPLEMENTED_MIPS(); + // BC1 BC1EQZ BC1NEZ handled in DecodeTypeImmed, should never come here. + UNREACHABLE(); } break; case COP1X: @@ -2071,13 +2067,23 @@ void Simulator::ConfigureTypeRegister(Instruction* instr, case DSRAV: *alu_out = rt >> rs; break; - case MFHI: - *alu_out = get_register(HI); + case MFHI: // MFHI == CLZ on R6. + if (kArchVariant != kMips64r6) { + ASSERT(instr->SaValue() == 0); + *alu_out = get_register(HI); + } else { + // MIPS spec: If no bits were set in GPR rs, the result written to + // GPR rd is 32. + // GCC __builtin_clz: If input is 0, the result is undefined. + ASSERT(instr->SaValue() == 1); + *alu_out = + rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u); + } break; case MFLO: *alu_out = get_register(LO); break; - case MULT: + case MULT: // MULT == D_MUL_MUH. // TODO(plind) - Unify MULT/DMULT with single set of 64-bit HI/Lo // regs. // TODO(plind) - make the 32-bit MULT ops conform to spec regarding @@ -2088,9 +2094,23 @@ void Simulator::ConfigureTypeRegister(Instruction* instr, case MULTU: *u64hilo = static_cast(rs_u) * static_cast(rt_u); break; - case DMULT: - *i128resultH = MultiplyHighSigned(rs, rt); - *i128resultL = rs * rt; + case DMULT: // DMULT == D_MUL_MUH. + if (kArchVariant != kMips64r6) { + *i128resultH = MultiplyHighSigned(rs, rt); + *i128resultL = rs * rt; + } else { + switch (instr->SaValue()) { + case MUL_OP: + *i128resultL = rs * rt; + break; + case MUH_OP: + *i128resultH = MultiplyHighSigned(rs, rt); + break; + default: + UNIMPLEMENTED_MIPS(); + break; + } + } break; case DMULTU: UNIMPLEMENTED_MIPS(); @@ -2295,6 +2315,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { case COP1: switch (instr->RsFieldRaw()) { case BC1: // Branch on coprocessor condition. + case BC1EQZ: + case BC1NEZ: UNREACHABLE(); break; case CFC1: @@ -2328,20 +2350,9 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { f = get_fpu_register_float(fs_reg); set_fpu_register_double(fd_reg, static_cast(f)); break; - case CVT_W_S: - case CVT_L_S: - case TRUNC_W_S: - case TRUNC_L_S: - case ROUND_W_S: - case ROUND_L_S: - case FLOOR_W_S: - case FLOOR_L_S: - case CEIL_W_S: - case CEIL_L_S: - case CVT_PS_S: - UNIMPLEMENTED_MIPS(); - break; default: + // CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S + // CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented. UNREACHABLE(); } break; @@ -2514,25 +2525,77 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { alu_out = get_fpu_register_signed_word(fs_reg); set_fpu_register_double(fd_reg, static_cast(alu_out)); break; - default: + default: // Mips64r6 CMP.S instructions unimplemented. UNREACHABLE(); } break; case L: + fs = get_fpu_register_double(fs_reg); + ft = get_fpu_register_double(ft_reg); switch (instr->FunctionFieldRaw()) { - case CVT_D_L: // Mips32r2 instruction. - i64 = get_fpu_register(fs_reg); - set_fpu_register_double(fd_reg, static_cast(i64)); - break; + case CVT_D_L: // Mips32r2 instruction. + i64 = get_fpu_register(fs_reg); + set_fpu_register_double(fd_reg, static_cast(i64)); + break; case CVT_S_L: UNIMPLEMENTED_MIPS(); break; - default: + case CMP_AF: // Mips64r6 CMP.D instructions. + UNIMPLEMENTED_MIPS(); + break; + case CMP_UN: + if (std::isnan(fs) || std::isnan(ft)) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + case CMP_EQ: + if (fs == ft) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + case CMP_UEQ: + if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + case CMP_LT: + if (fs < ft) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + case CMP_ULT: + if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + case CMP_LE: + if (fs <= ft) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + case CMP_ULE: + if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) { + set_fpu_register(fd_reg, -1); + } else { + set_fpu_register(fd_reg, 0); + } + break; + default: // CMP_OR CMP_UNE CMP_NE UNIMPLEMENTED UNREACHABLE(); } break; - case PS: - break; default: UNREACHABLE(); } @@ -2572,32 +2635,91 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { } // Instructions using HI and LO registers. case MULT: - set_register(LO, static_cast(i64hilo & 0xffffffff)); - set_register(HI, static_cast(i64hilo >> 32)); + if (kArchVariant != kMips64r6) { + set_register(LO, static_cast(i64hilo & 0xffffffff)); + set_register(HI, static_cast(i64hilo >> 32)); + } else { + switch (instr->SaValue()) { + case MUL_OP: + set_register(rd_reg, + static_cast(i64hilo & 0xffffffff)); + break; + case MUH_OP: + set_register(rd_reg, static_cast(i64hilo >> 32)); + break; + default: + UNIMPLEMENTED_MIPS(); + break; + } + } break; case MULTU: set_register(LO, static_cast(u64hilo & 0xffffffff)); set_register(HI, static_cast(u64hilo >> 32)); break; - case DMULT: - set_register(LO, static_cast(i128resultL)); - set_register(HI, static_cast(i128resultH)); + case DMULT: // DMULT == D_MUL_MUH. + if (kArchVariant != kMips64r6) { + set_register(LO, static_cast(i128resultL)); + set_register(HI, static_cast(i128resultH)); + } else { + switch (instr->SaValue()) { + case MUL_OP: + set_register(rd_reg, static_cast(i128resultL)); + break; + case MUH_OP: + set_register(rd_reg, static_cast(i128resultH)); + break; + default: + UNIMPLEMENTED_MIPS(); + break; + } + } break; case DMULTU: UNIMPLEMENTED_MIPS(); break; + case DSLL: + set_register(rd_reg, alu_out); + break; case DIV: case DDIV: - // Divide by zero and overflow was not checked in the configuration - // step - div and divu do not raise exceptions. On division by 0 - // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1), - // return INT_MIN which is what the hardware does. - if (rs == INT_MIN && rt == -1) { - set_register(LO, INT_MIN); - set_register(HI, 0); - } else if (rt != 0) { - set_register(LO, rs / rt); - set_register(HI, rs % rt); + switch (kArchVariant) { + case kMips64r2: + // Divide by zero and overflow was not checked in the + // configuration step - div and divu do not raise exceptions. On + // division by 0 the result will be UNPREDICTABLE. On overflow + // (INT_MIN/-1), return INT_MIN which is what the hardware does. + if (rs == INT_MIN && rt == -1) { + set_register(LO, INT_MIN); + set_register(HI, 0); + } else if (rt != 0) { + set_register(LO, rs / rt); + set_register(HI, rs % rt); + } + break; + case kMips64r6: + switch (instr->SaValue()) { + case DIV_OP: + if (rs == INT_MIN && rt == -1) { + set_register(rd_reg, INT_MIN); + } else if (rt != 0) { + set_register(rd_reg, rs / rt); + } + break; + case MOD_OP: + if (rs == INT_MIN && rt == -1) { + set_register(rd_reg, 0); + } else if (rt != 0) { + set_register(rd_reg, rs % rt); + } + break; + default: + UNIMPLEMENTED_MIPS(); + break; + } + break; + default: + break; } break; case DIVU: @@ -2696,6 +2818,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) { int16_t imm16 = instr->Imm16Value(); int32_t ft_reg = instr->FtValue(); // Destination register. + int64_t ft = get_fpu_register(ft_reg); // Zero extended immediate. uint32_t oe_imm16 = 0xffff & imm16; @@ -2742,6 +2865,26 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) { next_pc = current_pc + kBranchReturnOffset; } break; + case BC1EQZ: + do_branch = (ft & 0x1) ? false : true; + execute_branch_delay_instruction = true; + // Set next_pc. + if (do_branch) { + next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; + } else { + next_pc = current_pc + kBranchReturnOffset; + } + break; + case BC1NEZ: + do_branch = (ft & 0x1) ? true : false; + execute_branch_delay_instruction = true; + // Set next_pc. + if (do_branch) { + next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; + } else { + next_pc = current_pc + kBranchReturnOffset; + } + break; default: UNREACHABLE(); } diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index 95ec479634..d174b307cb 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -18064,7 +18064,7 @@ THREADED_TEST(QuietSignalingNaNs) { uint64_t stored_bits = DoubleToBits(stored_number); // Check if quiet nan (bits 51..62 all set). #if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \ - !defined(USE_SIMULATOR) + !defined(_MIPS_ARCH_MIPS64R6) && !defined(USE_SIMULATOR) // Most significant fraction bit for quiet nan is set to 0 // on MIPS architecture. Allowed by IEEE-754. CHECK_EQ(0xffe, static_cast((stored_bits >> 51) & 0xfff)); @@ -18085,7 +18085,7 @@ THREADED_TEST(QuietSignalingNaNs) { uint64_t stored_bits = DoubleToBits(stored_date); // Check if quiet nan (bits 51..62 all set). #if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \ - !defined(USE_SIMULATOR) + !defined(_MIPS_ARCH_MIPS64R6) && !defined(USE_SIMULATOR) // Most significant fraction bit for quiet nan is set to 0 // on MIPS architecture. Allowed by IEEE-754. CHECK_EQ(0xffe, static_cast((stored_bits >> 51) & 0xfff)); diff --git a/test/cctest/test-assembler-mips64.cc b/test/cctest/test-assembler-mips64.cc index bd17595ef1..4e9238930a 100644 --- a/test/cctest/test-assembler-mips64.cc +++ b/test/cctest/test-assembler-mips64.cc @@ -557,21 +557,27 @@ TEST(MIPS7) { __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) ); __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) ); - __ c(UN, D, f4, f6); - __ bc1f(&neither_is_nan); + if (kArchVariant != kMips64r6) { + __ c(UN, D, f4, f6); + __ bc1f(&neither_is_nan); + } else { + __ cmp(UN, L, f2, f4, f6); + __ bc1eqz(&neither_is_nan, f2); + } __ nop(); __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) ); __ Branch(&outa_here); __ bind(&neither_is_nan); - if (kArchVariant == kLoongson) { - __ c(OLT, D, f6, f4); - __ bc1t(&less_than); + if (kArchVariant == kMips64r6) { + __ cmp(OLT, L, f2, f6, f4); + __ bc1nez(&less_than, f2); } else { __ c(OLT, D, f6, f4, 2); __ bc1t(&less_than, 2); } + __ nop(); __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) ); __ Branch(&outa_here); @@ -832,144 +838,147 @@ TEST(MIPS10) { TEST(MIPS11) { - // Test LWL, LWR, SWL and SWR instructions. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); + // Do not run test on MIPS64r6, as these instructions are removed. + if (kArchVariant != kMips64r6) { + // Test LWL, LWR, SWL and SWR instructions. + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + HandleScope scope(isolate); - typedef struct { - int32_t reg_init; - int32_t mem_init; - int32_t lwl_0; - int32_t lwl_1; - int32_t lwl_2; - int32_t lwl_3; - int32_t lwr_0; - int32_t lwr_1; - int32_t lwr_2; - int32_t lwr_3; - int32_t swl_0; - int32_t swl_1; - int32_t swl_2; - int32_t swl_3; - int32_t swr_0; - int32_t swr_1; - int32_t swr_2; - int32_t swr_3; - } T; - T t; + typedef struct { + int32_t reg_init; + int32_t mem_init; + int32_t lwl_0; + int32_t lwl_1; + int32_t lwl_2; + int32_t lwl_3; + int32_t lwr_0; + int32_t lwr_1; + int32_t lwr_2; + int32_t lwr_3; + int32_t swl_0; + int32_t swl_1; + int32_t swl_2; + int32_t swl_3; + int32_t swr_0; + int32_t swr_1; + int32_t swr_2; + int32_t swr_3; + } T; + T t; - Assembler assm(isolate, NULL, 0); + Assembler assm(isolate, NULL, 0); - // Test all combinations of LWL and vAddr. - __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) ); - __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)) ); + // Test all combinations of LWL and vAddr. + __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) ); + __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)) ); - __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) ); - __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)) ); + __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) ); + __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)) ); - __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) ); - __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)) ); + __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) ); + __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)) ); - __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) ); - __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)) ); + __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) ); + __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)) ); - // Test all combinations of LWR and vAddr. - __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) ); - __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)) ); + // Test all combinations of LWR and vAddr. + __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) ); + __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)) ); - __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) ); - __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)) ); + __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) ); + __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)) ); - __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) ); - __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwr_2)) ); + __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) ); + __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwr_2)) ); - __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) ); - __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwr_3)) ); + __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) ); + __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwr_3)) ); - // Test all combinations of SWL and vAddr. - __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) ); - __ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) ); - __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) ); + // Test all combinations of SWL and vAddr. + __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) ); + __ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) ); + __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) ); - __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) ); - __ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)) ); - __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1) ); + __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) ); + __ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)) ); + __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1) ); - __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) ); - __ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)) ); - __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2) ); + __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) ); + __ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)) ); + __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2) ); - __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) ); - __ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)) ); - __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3) ); + __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) ); + __ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)) ); + __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3) ); - // Test all combinations of SWR and vAddr. - __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) ); - __ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) ); - __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) ); + // Test all combinations of SWR and vAddr. + __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) ); + __ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) ); + __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) ); - __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) ); - __ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)) ); - __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1) ); + __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) ); + __ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)) ); + __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1) ); - __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) ); - __ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)) ); - __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2) ); + __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) ); + __ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)) ); + __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2) ); - __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) ); - __ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)) ); - __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) ); - __ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3) ); + __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) ); + __ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)) ); + __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) ); + __ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3) ); - __ jr(ra); - __ nop(); + __ jr(ra); + __ nop(); - CodeDesc desc; - assm.GetCode(&desc); - Handle code = isolate->factory()->NewCode( - desc, Code::ComputeFlags(Code::STUB), Handle()); - F3 f = FUNCTION_CAST(code->entry()); - t.reg_init = 0xaabbccdd; - t.mem_init = 0x11223344; + CodeDesc desc; + assm.GetCode(&desc); + Handle code = isolate->factory()->NewCode( + desc, Code::ComputeFlags(Code::STUB), Handle()); + F3 f = FUNCTION_CAST(code->entry()); + t.reg_init = 0xaabbccdd; + t.mem_init = 0x11223344; - Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0); - USE(dummy); + Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0); + USE(dummy); - CHECK_EQ(0x44bbccdd, t.lwl_0); - CHECK_EQ(0x3344ccdd, t.lwl_1); - CHECK_EQ(0x223344dd, t.lwl_2); - CHECK_EQ(0x11223344, t.lwl_3); + CHECK_EQ(0x44bbccdd, t.lwl_0); + CHECK_EQ(0x3344ccdd, t.lwl_1); + CHECK_EQ(0x223344dd, t.lwl_2); + CHECK_EQ(0x11223344, t.lwl_3); - CHECK_EQ(0x11223344, t.lwr_0); - CHECK_EQ(0xaa112233, t.lwr_1); - CHECK_EQ(0xaabb1122, t.lwr_2); - CHECK_EQ(0xaabbcc11, t.lwr_3); + CHECK_EQ(0x11223344, t.lwr_0); + CHECK_EQ(0xaa112233, t.lwr_1); + CHECK_EQ(0xaabb1122, t.lwr_2); + CHECK_EQ(0xaabbcc11, t.lwr_3); - CHECK_EQ(0x112233aa, t.swl_0); - CHECK_EQ(0x1122aabb, t.swl_1); - CHECK_EQ(0x11aabbcc, t.swl_2); - CHECK_EQ(0xaabbccdd, t.swl_3); + CHECK_EQ(0x112233aa, t.swl_0); + CHECK_EQ(0x1122aabb, t.swl_1); + CHECK_EQ(0x11aabbcc, t.swl_2); + CHECK_EQ(0xaabbccdd, t.swl_3); - CHECK_EQ(0xaabbccdd, t.swr_0); - CHECK_EQ(0xbbccdd44, t.swr_1); - CHECK_EQ(0xccdd3344, t.swr_2); - CHECK_EQ(0xdd223344, t.swr_3); + CHECK_EQ(0xaabbccdd, t.swr_0); + CHECK_EQ(0xbbccdd44, t.swr_1); + CHECK_EQ(0xccdd3344, t.swr_2); + CHECK_EQ(0xdd223344, t.swr_3); + } } diff --git a/test/cctest/test-disasm-mips64.cc b/test/cctest/test-disasm-mips64.cc index c615afbf46..d682d33480 100644 --- a/test/cctest/test-disasm-mips64.cc +++ b/test/cctest/test-disasm-mips64.cc @@ -122,65 +122,196 @@ TEST(Type0) { COMPARE(dsubu(v0, v1, s0), "0070102f dsubu v0, v1, s0"); - COMPARE(mult(a0, a1), - "00850018 mult a0, a1"); - COMPARE(dmult(a0, a1), - "0085001c dmult a0, a1"); - COMPARE(mult(a6, a7), - "014b0018 mult a6, a7"); - COMPARE(dmult(a6, a7), - "014b001c dmult a6, a7"); - COMPARE(mult(v0, v1), - "00430018 mult v0, v1"); - COMPARE(dmult(v0, v1), - "0043001c dmult v0, v1"); + if (kArchVariant != kMips64r6) { + COMPARE(mult(a0, a1), + "00850018 mult a0, a1"); + COMPARE(dmult(a0, a1), + "0085001c dmult a0, a1"); + COMPARE(mult(a6, a7), + "014b0018 mult a6, a7"); + COMPARE(dmult(a6, a7), + "014b001c dmult a6, a7"); + COMPARE(mult(v0, v1), + "00430018 mult v0, v1"); + COMPARE(dmult(v0, v1), + "0043001c dmult v0, v1"); - COMPARE(multu(a0, a1), - "00850019 multu a0, a1"); - COMPARE(dmultu(a0, a1), - "0085001d dmultu a0, a1"); - COMPARE(multu(a6, a7), - "014b0019 multu a6, a7"); - COMPARE(dmultu(a6, a7), - "014b001d dmultu a6, a7"); - COMPARE(multu(v0, v1), - "00430019 multu v0, v1"); - COMPARE(dmultu(v0, v1), - "0043001d dmultu v0, v1"); + COMPARE(multu(a0, a1), + "00850019 multu a0, a1"); + COMPARE(dmultu(a0, a1), + "0085001d dmultu a0, a1"); + COMPARE(multu(a6, a7), + "014b0019 multu a6, a7"); + COMPARE(dmultu(a6, a7), + "014b001d dmultu a6, a7"); + COMPARE(multu(v0, v1), + "00430019 multu v0, v1"); + COMPARE(dmultu(v0, v1), + "0043001d dmultu v0, v1"); - COMPARE(div(a0, a1), - "0085001a div a0, a1"); - COMPARE(div(a6, a7), - "014b001a div a6, a7"); - COMPARE(div(v0, v1), - "0043001a div v0, v1"); - COMPARE(ddiv(a0, a1), - "0085001e ddiv a0, a1"); - COMPARE(ddiv(a6, a7), - "014b001e ddiv a6, a7"); - COMPARE(ddiv(v0, v1), - "0043001e ddiv v0, v1"); + COMPARE(div(a0, a1), + "0085001a div a0, a1"); + COMPARE(div(a6, a7), + "014b001a div a6, a7"); + COMPARE(div(v0, v1), + "0043001a div v0, v1"); + COMPARE(ddiv(a0, a1), + "0085001e ddiv a0, a1"); + COMPARE(ddiv(a6, a7), + "014b001e ddiv a6, a7"); + COMPARE(ddiv(v0, v1), + "0043001e ddiv v0, v1"); - COMPARE(divu(a0, a1), - "0085001b divu a0, a1"); - COMPARE(divu(a6, a7), - "014b001b divu a6, a7"); - COMPARE(divu(v0, v1), - "0043001b divu v0, v1"); - COMPARE(ddivu(a0, a1), - "0085001f ddivu a0, a1"); - COMPARE(ddivu(a6, a7), - "014b001f ddivu a6, a7"); - COMPARE(ddivu(v0, v1), - "0043001f ddivu v0, v1"); - - if (kArchVariant != kLoongson) { + COMPARE(divu(a0, a1), + "0085001b divu a0, a1"); + COMPARE(divu(a6, a7), + "014b001b divu a6, a7"); + COMPARE(divu(v0, v1), + "0043001b divu v0, v1"); + COMPARE(ddivu(a0, a1), + "0085001f ddivu a0, a1"); + COMPARE(ddivu(a6, a7), + "014b001f ddivu a6, a7"); + COMPARE(ddivu(v0, v1), + "0043001f ddivu v0, v1"); COMPARE(mul(a0, a1, a2), "70a62002 mul a0, a1, a2"); COMPARE(mul(a6, a7, t0), "716c5002 mul a6, a7, t0"); COMPARE(mul(v0, v1, s0), "70701002 mul v0, v1, s0"); + } else { // MIPS64r6. + COMPARE(mul(a0, a1, a2), + "00a62098 mul a0, a1, a2"); + COMPARE(muh(a0, a1, a2), + "00a620d8 muh a0, a1, a2"); + COMPARE(dmul(a0, a1, a2), + "00a6209c dmul a0, a1, a2"); + COMPARE(dmuh(a0, a1, a2), + "00a620dc dmuh a0, a1, a2"); + COMPARE(mul(a5, a6, a7), + "014b4898 mul a5, a6, a7"); + COMPARE(muh(a5, a6, a7), + "014b48d8 muh a5, a6, a7"); + COMPARE(dmul(a5, a6, a7), + "014b489c dmul a5, a6, a7"); + COMPARE(dmuh(a5, a6, a7), + "014b48dc dmuh a5, a6, a7"); + COMPARE(mul(v0, v1, a0), + "00641098 mul v0, v1, a0"); + COMPARE(muh(v0, v1, a0), + "006410d8 muh v0, v1, a0"); + COMPARE(dmul(v0, v1, a0), + "0064109c dmul v0, v1, a0"); + COMPARE(dmuh(v0, v1, a0), + "006410dc dmuh v0, v1, a0"); + + COMPARE(mulu(a0, a1, a2), + "00a62099 mulu a0, a1, a2"); + COMPARE(muhu(a0, a1, a2), + "00a620d9 muhu a0, a1, a2"); + COMPARE(dmulu(a0, a1, a2), + "00a6209d dmulu a0, a1, a2"); + COMPARE(dmuhu(a0, a1, a2), + "00a620dd dmuhu a0, a1, a2"); + COMPARE(mulu(a5, a6, a7), + "014b4899 mulu a5, a6, a7"); + COMPARE(muhu(a5, a6, a7), + "014b48d9 muhu a5, a6, a7"); + COMPARE(dmulu(a5, a6, a7), + "014b489d dmulu a5, a6, a7"); + COMPARE(dmuhu(a5, a6, a7), + "014b48dd dmuhu a5, a6, a7"); + COMPARE(mulu(v0, v1, a0), + "00641099 mulu v0, v1, a0"); + COMPARE(muhu(v0, v1, a0), + "006410d9 muhu v0, v1, a0"); + COMPARE(dmulu(v0, v1, a0), + "0064109d dmulu v0, v1, a0"); + COMPARE(dmuhu(v0, v1, a0), + "006410dd dmuhu v0, v1, a0"); + + COMPARE(div(a0, a1, a2), + "00a6209a div a0, a1, a2"); + COMPARE(mod(a0, a1, a2), + "00a620da mod a0, a1, a2"); + COMPARE(ddiv(a0, a1, a2), + "00a6209e ddiv a0, a1, a2"); + COMPARE(dmod(a0, a1, a2), + "00a620de dmod a0, a1, a2"); + COMPARE(div(a5, a6, a7), + "014b489a div a5, a6, a7"); + COMPARE(mod(a5, a6, a7), + "014b48da mod a5, a6, a7"); + COMPARE(ddiv(a5, a6, a7), + "014b489e ddiv a5, a6, a7"); + COMPARE(dmod(a5, a6, a7), + "014b48de dmod a5, a6, a7"); + COMPARE(div(v0, v1, a0), + "0064109a div v0, v1, a0"); + COMPARE(mod(v0, v1, a0), + "006410da mod v0, v1, a0"); + COMPARE(ddiv(v0, v1, a0), + "0064109e ddiv v0, v1, a0"); + COMPARE(dmod(v0, v1, a0), + "006410de dmod v0, v1, a0"); + + COMPARE(divu(a0, a1, a2), + "00a6209b divu a0, a1, a2"); + COMPARE(modu(a0, a1, a2), + "00a620db modu a0, a1, a2"); + COMPARE(ddivu(a0, a1, a2), + "00a6209f ddivu a0, a1, a2"); + COMPARE(dmodu(a0, a1, a2), + "00a620df dmodu a0, a1, a2"); + COMPARE(divu(a5, a6, a7), + "014b489b divu a5, a6, a7"); + COMPARE(modu(a5, a6, a7), + "014b48db modu a5, a6, a7"); + COMPARE(ddivu(a5, a6, a7), + "014b489f ddivu a5, a6, a7"); + COMPARE(dmodu(a5, a6, a7), + "014b48df dmodu a5, a6, a7"); + COMPARE(divu(v0, v1, a0), + "0064109b divu v0, v1, a0"); + COMPARE(modu(v0, v1, a0), + "006410db modu v0, v1, a0"); + COMPARE(ddivu(v0, v1, a0), + "0064109f ddivu v0, v1, a0"); + COMPARE(dmodu(v0, v1, a0), + "006410df dmodu v0, v1, a0"); + + COMPARE(bovc(a0, a0, static_cast(0)), + "20840000 bovc a0, a0, 0"); + COMPARE(bovc(a1, a0, static_cast(0)), + "20a40000 bovc a1, a0, 0"); + COMPARE(bovc(a1, a0, 32767), + "20a47fff bovc a1, a0, 32767"); + COMPARE(bovc(a1, a0, -32768), + "20a48000 bovc a1, a0, -32768"); + + COMPARE(bnvc(a0, a0, static_cast(0)), + "60840000 bnvc a0, a0, 0"); + COMPARE(bnvc(a1, a0, static_cast(0)), + "60a40000 bnvc a1, a0, 0"); + COMPARE(bnvc(a1, a0, 32767), + "60a47fff bnvc a1, a0, 32767"); + COMPARE(bnvc(a1, a0, -32768), + "60a48000 bnvc a1, a0, -32768"); + + COMPARE(beqzc(a0, 0), + "d8800000 beqzc a0, 0x0"); + COMPARE(beqzc(a0, 0xfffff), // 0x0fffff == 1048575. + "d88fffff beqzc a0, 0xfffff"); + COMPARE(beqzc(a0, 0x100000), // 0x100000 == -1048576. + "d8900000 beqzc a0, 0x100000"); + + COMPARE(bnezc(a0, 0), + "f8800000 bnezc a0, 0x0"); + COMPARE(bnezc(a0, 0xfffff), // 0x0fffff == 1048575. + "f88fffff bnezc a0, 0xfffff"); + COMPARE(bnezc(a0, 0x100000), // 0x100000 == -1048576. + "f8900000 bnezc a0, 0x100000"); } COMPARE(addiu(a0, a1, 0x0), @@ -476,42 +607,48 @@ TEST(Type0) { "2d6a8000 sltiu a6, a7, -32768"); COMPARE(sltiu(v0, v1, -1), "2c62ffff sltiu v0, v1, -1"); + COMPARE(movz(a0, a1, a2), + "00a6200a movz a0, a1, a2"); + COMPARE(movz(s0, s1, s2), + "0232800a movz s0, s1, s2"); + COMPARE(movz(a6, a7, t0), + "016c500a movz a6, a7, t0"); + COMPARE(movz(v0, v1, a2), + "0066100a movz v0, v1, a2"); + COMPARE(movn(a0, a1, a2), + "00a6200b movn a0, a1, a2"); + COMPARE(movn(s0, s1, s2), + "0232800b movn s0, s1, s2"); + COMPARE(movn(a6, a7, t0), + "016c500b movn a6, a7, t0"); + COMPARE(movn(v0, v1, a2), + "0066100b movn v0, v1, a2"); - if (kArchVariant != kLoongson) { - COMPARE(movz(a0, a1, a2), - "00a6200a movz a0, a1, a2"); - COMPARE(movz(s0, s1, s2), - "0232800a movz s0, s1, s2"); - COMPARE(movz(a6, a7, t0), - "016c500a movz a6, a7, t0"); - COMPARE(movz(v0, v1, a2), - "0066100a movz v0, v1, a2"); - COMPARE(movn(a0, a1, a2), - "00a6200b movn a0, a1, a2"); - COMPARE(movn(s0, s1, s2), - "0232800b movn s0, s1, s2"); - COMPARE(movn(a6, a7, t0), - "016c500b movn a6, a7, t0"); - COMPARE(movn(v0, v1, a2), - "0066100b movn v0, v1, a2"); - - COMPARE(movt(a0, a1, 1), - "00a52001 movt a0, a1, 1"); - COMPARE(movt(s0, s1, 2), - "02298001 movt s0, s1, 2"); - COMPARE(movt(a6, a7, 3), - "016d5001 movt a6, a7, 3"); - COMPARE(movt(v0, v1, 7), - "007d1001 movt v0, v1, 7"); - COMPARE(movf(a0, a1, 0), - "00a02001 movf a0, a1, 0"); - COMPARE(movf(s0, s1, 4), - "02308001 movf s0, s1, 4"); - COMPARE(movf(a6, a7, 5), - "01745001 movf a6, a7, 5"); - COMPARE(movf(v0, v1, 6), - "00781001 movf v0, v1, 6"); + COMPARE(movt(a0, a1, 1), + "00a52001 movt a0, a1, 1"); + COMPARE(movt(s0, s1, 2), + "02298001 movt s0, s1, 2"); + COMPARE(movt(a6, a7, 3), + "016d5001 movt a6, a7, 3"); + COMPARE(movt(v0, v1, 7), + "007d1001 movt v0, v1, 7"); + COMPARE(movf(a0, a1, 0), + "00a02001 movf a0, a1, 0"); + COMPARE(movf(s0, s1, 4), + "02308001 movf s0, s1, 4"); + COMPARE(movf(a6, a7, 5), + "01745001 movf a6, a7, 5"); + COMPARE(movf(v0, v1, 6), + "00781001 movf v0, v1, 6"); + if (kArchVariant == kMips64r6) { + COMPARE(clz(a0, a1), + "00a02050 clz a0, a1"); + COMPARE(clz(s6, s7), + "02e0b050 clz s6, s7"); + COMPARE(clz(v0, v1), + "00601050 clz v0, v1"); + } else { COMPARE(clz(a0, a1), "70a42020 clz a0, a1"); COMPARE(clz(s6, s7), @@ -520,20 +657,18 @@ TEST(Type0) { "70621020 clz v0, v1"); } - if (kArchVariant == kMips64r2) { - COMPARE(ins_(a0, a1, 31, 1), - "7ca4ffc4 ins a0, a1, 31, 1"); - COMPARE(ins_(s6, s7, 30, 2), - "7ef6ff84 ins s6, s7, 30, 2"); - COMPARE(ins_(v0, v1, 0, 32), - "7c62f804 ins v0, v1, 0, 32"); - COMPARE(ext_(a0, a1, 31, 1), - "7ca407c0 ext a0, a1, 31, 1"); - COMPARE(ext_(s6, s7, 30, 2), - "7ef60f80 ext s6, s7, 30, 2"); - COMPARE(ext_(v0, v1, 0, 32), - "7c62f800 ext v0, v1, 0, 32"); - } + COMPARE(ins_(a0, a1, 31, 1), + "7ca4ffc4 ins a0, a1, 31, 1"); + COMPARE(ins_(s6, s7, 30, 2), + "7ef6ff84 ins s6, s7, 30, 2"); + COMPARE(ins_(v0, v1, 0, 32), + "7c62f804 ins v0, v1, 0, 32"); + COMPARE(ext_(a0, a1, 31, 1), + "7ca407c0 ext a0, a1, 31, 1"); + COMPARE(ext_(s6, s7, 30, 2), + "7ef60f80 ext s6, s7, 30, 2"); + COMPARE(ext_(v0, v1, 0, 32), + "7c62f800 ext v0, v1, 0, 32"); VERIFY_RUN(); } diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status index e3de741dea..f329deb360 100644 --- a/test/mjsunit/mjsunit.status +++ b/test/mjsunit/mjsunit.status @@ -414,6 +414,9 @@ # Currently always deopt on minus zero 'math-floor-of-div-minus-zero': [SKIP], + + # BUG(v8:3457). + 'deserialize-reference': [SKIP], }], # 'arch == mips64el' ['arch == mips64el and simulator_run == False', {