MIPS64: Add support for architecture revision 6.

TEST=
BUG=
R=jkummerow@chromium.org, paul.lind@imgtec.com

Review URL: https://codereview.chromium.org/426863006

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22681 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
dusan.milosavljevic@imgtec.com 2014-07-29 18:02:26 +00:00
parent b337649d14
commit e0401f3f71
18 changed files with 2033 additions and 510 deletions

View File

@ -376,6 +376,14 @@
'cflags': ['-msoft-float'], 'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'], 'ldflags': ['-msoft-float'],
}], }],
['mips_arch_variant=="r6"', {
'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
'ldflags': [
'-mips64r6', '-mabi=64',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
}],
['mips_arch_variant=="r2"', { ['mips_arch_variant=="r2"', {
'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'], 'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
'ldflags': [ 'ldflags': [
@ -384,9 +392,6 @@
'-Wl,--rpath=$(LD_R_PATH)', '-Wl,--rpath=$(LD_R_PATH)',
], ],
}], }],
['mips_arch_variant=="loongson"', {
'cflags': ['-mips3', '-Wa,-mips3'],
}],
], ],
}], }],
], ],
@ -406,12 +411,12 @@
'__mips_soft_float=1' '__mips_soft_float=1'
], ],
}], }],
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
}],
['mips_arch_variant=="r2"', { ['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',], 'defines': ['_MIPS_ARCH_MIPS64R2',],
}], }],
['mips_arch_variant=="loongson"', {
'defines': ['_MIPS_ARCH_LOONGSON',],
}],
], ],
}], # v8_target_arch=="mips64el" }], # v8_target_arch=="mips64el"
['v8_target_arch=="x64"', { ['v8_target_arch=="x64"', {

View File

@ -485,7 +485,9 @@ bool Assembler::IsBranch(Instr instr) {
opcode == BGTZL || opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ || (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) || rt_field == BLTZAL || rt_field == BGEZAL)) ||
(opcode == COP1 && rs_field == BC1); // Coprocessor branch. (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
(opcode == COP1 && rs_field == BC1EQZ) ||
(opcode == COP1 && rs_field == BC1NEZ);
} }
@ -969,7 +971,6 @@ void Assembler::GenInstrJump(Opcode opcode,
// Returns the next free trampoline entry. // Returns the next free trampoline entry.
int32_t Assembler::get_trampoline_entry(int32_t pos) { int32_t Assembler::get_trampoline_entry(int32_t pos) {
int32_t trampoline_entry = kInvalidSlotPos; int32_t trampoline_entry = kInvalidSlotPos;
if (!internal_trampoline_exception_) { if (!internal_trampoline_exception_) {
if (trampoline_.start() > pos) { if (trampoline_.start() > pos) {
trampoline_entry = trampoline_.take_slot(); trampoline_entry = trampoline_.take_slot();
@ -985,7 +986,6 @@ int32_t Assembler::get_trampoline_entry(int32_t pos) {
uint64_t Assembler::jump_address(Label* L) { uint64_t Assembler::jump_address(Label* L) {
int64_t target_pos; int64_t target_pos;
if (L->is_bound()) { if (L->is_bound()) {
target_pos = L->pos(); target_pos = L->pos();
} else { } else {
@ -1007,7 +1007,6 @@ uint64_t Assembler::jump_address(Label* L) {
int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t target_pos; int32_t target_pos;
if (L->is_bound()) { if (L->is_bound()) {
target_pos = L->pos(); target_pos = L->pos();
} else { } else {
@ -1032,6 +1031,86 @@ int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
} }
int32_t Assembler::branch_offset_compact(Label* L,
bool jump_elimination_allowed) {
int32_t target_pos;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos();
L->link_to(pc_offset());
} else {
L->link_to(pc_offset());
if (!trampoline_emitted_) {
unbound_labels_count_++;
next_buffer_check_ -= kTrampolineSlotsSize;
}
return kEndOfChain;
}
}
int32_t offset = target_pos - pc_offset();
ASSERT((offset & 3) == 0);
ASSERT(is_int16(offset >> 2));
return offset;
}
int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
int32_t target_pos;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos();
L->link_to(pc_offset());
} else {
L->link_to(pc_offset());
if (!trampoline_emitted_) {
unbound_labels_count_++;
next_buffer_check_ -= kTrampolineSlotsSize;
}
return kEndOfChain;
}
}
int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
ASSERT((offset & 3) == 0);
ASSERT(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
return offset;
}
int32_t Assembler::branch_offset21_compact(Label* L,
bool jump_elimination_allowed) {
int32_t target_pos;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos();
L->link_to(pc_offset());
} else {
L->link_to(pc_offset());
if (!trampoline_emitted_) {
unbound_labels_count_++;
next_buffer_check_ -= kTrampolineSlotsSize;
}
return kEndOfChain;
}
}
int32_t offset = target_pos - pc_offset();
ASSERT((offset & 3) == 0);
ASSERT(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
return offset;
}
void Assembler::label_at_put(Label* L, int at_offset) { void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos; int target_pos;
if (L->is_bound()) { if (L->is_bound()) {
@ -1085,7 +1164,33 @@ void Assembler::bgez(Register rs, int16_t offset) {
} }
void Assembler::bgezc(Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rt.is(zero_reg)));
GenInstrImmediate(BLEZL, rt, rt, offset);
}
void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rs.is(zero_reg)));
ASSERT(!(rt.is(zero_reg)));
ASSERT(rs.code() != rt.code());
GenInstrImmediate(BLEZ, rs, rt, offset);
}
void Assembler::bgec(Register rs, Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rs.is(zero_reg)));
ASSERT(!(rt.is(zero_reg)));
ASSERT(rs.code() != rt.code());
GenInstrImmediate(BLEZL, rs, rt, offset);
}
void Assembler::bgezal(Register rs, int16_t offset) { void Assembler::bgezal(Register rs, int16_t offset) {
ASSERT(kArchVariant != kMips64r6 || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions(); positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZAL, offset); GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
@ -1100,6 +1205,13 @@ void Assembler::bgtz(Register rs, int16_t offset) {
} }
void Assembler::bgtzc(Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rt.is(zero_reg)));
GenInstrImmediate(BGTZL, zero_reg, rt, offset);
}
void Assembler::blez(Register rs, int16_t offset) { void Assembler::blez(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BLEZ, rs, zero_reg, offset); GenInstrImmediate(BLEZ, rs, zero_reg, offset);
@ -1107,6 +1219,38 @@ void Assembler::blez(Register rs, int16_t offset) {
} }
void Assembler::blezc(Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rt.is(zero_reg)));
GenInstrImmediate(BLEZL, zero_reg, rt, offset);
}
void Assembler::bltzc(Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rt.is(zero_reg)));
GenInstrImmediate(BGTZL, rt, rt, offset);
}
void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rs.is(zero_reg)));
ASSERT(!(rt.is(zero_reg)));
ASSERT(rs.code() != rt.code());
GenInstrImmediate(BGTZ, rs, rt, offset);
}
void Assembler::bltc(Register rs, Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rs.is(zero_reg)));
ASSERT(!(rt.is(zero_reg)));
ASSERT(rs.code() != rt.code());
GenInstrImmediate(BGTZL, rs, rt, offset);
}
void Assembler::bltz(Register rs, int16_t offset) { void Assembler::bltz(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZ, offset); GenInstrImmediate(REGIMM, rs, BLTZ, offset);
@ -1115,6 +1259,7 @@ void Assembler::bltz(Register rs, int16_t offset) {
void Assembler::bltzal(Register rs, int16_t offset) { void Assembler::bltzal(Register rs, int16_t offset) {
ASSERT(kArchVariant != kMips64r6 || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions(); positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BLTZAL, offset); GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
@ -1129,6 +1274,101 @@ void Assembler::bne(Register rs, Register rt, int16_t offset) {
} }
void Assembler::bovc(Register rs, Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rs.is(zero_reg)));
ASSERT(rs.code() >= rt.code());
GenInstrImmediate(ADDI, rs, rt, offset);
}
void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rs.is(zero_reg)));
ASSERT(rs.code() >= rt.code());
GenInstrImmediate(DADDI, rs, rt, offset);
}
void Assembler::blezalc(Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rt.is(zero_reg)));
GenInstrImmediate(BLEZ, zero_reg, rt, offset);
}
void Assembler::bgezalc(Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rt.is(zero_reg)));
GenInstrImmediate(BLEZ, rt, rt, offset);
}
void Assembler::bgezall(Register rs, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rs.is(zero_reg)));
GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
}
void Assembler::bltzalc(Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rt.is(zero_reg)));
GenInstrImmediate(BGTZ, rt, rt, offset);
}
void Assembler::bgtzalc(Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rt.is(zero_reg)));
GenInstrImmediate(BGTZ, zero_reg, rt, offset);
}
void Assembler::beqzalc(Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rt.is(zero_reg)));
GenInstrImmediate(ADDI, zero_reg, rt, offset);
}
void Assembler::bnezalc(Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rt.is(zero_reg)));
GenInstrImmediate(DADDI, zero_reg, rt, offset);
}
void Assembler::beqc(Register rs, Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(rs.code() < rt.code());
GenInstrImmediate(ADDI, rs, rt, offset);
}
void Assembler::beqzc(Register rs, int32_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rs.is(zero_reg)));
Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
emit(instr);
}
void Assembler::bnec(Register rs, Register rt, int16_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(rs.code() < rt.code());
GenInstrImmediate(DADDI, rs, rt, offset);
}
void Assembler::bnezc(Register rs, int32_t offset) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(!(rs.is(zero_reg)));
Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
emit(instr);
}
void Assembler::j(int64_t target) { void Assembler::j(int64_t target) {
#if DEBUG #if DEBUG
// Get pc of delay slot. // Get pc of delay slot.
@ -1142,12 +1382,16 @@ void Assembler::j(int64_t target) {
void Assembler::jr(Register rs) { void Assembler::jr(Register rs) {
BlockTrampolinePoolScope block_trampoline_pool(this); if (kArchVariant != kMips64r6) {
if (rs.is(ra)) { BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions(); if (rs.is(ra)) {
positions_recorder()->WriteRecordedPositions();
}
GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
BlockTrampolinePoolFor(1); // For associated delay slot.
} else {
jalr(rs, zero_reg);
} }
GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
BlockTrampolinePoolFor(1); // For associated delay slot.
} }
@ -1218,16 +1462,64 @@ void Assembler::subu(Register rd, Register rs, Register rt) {
void Assembler::mul(Register rd, Register rs, Register rt) { void Assembler::mul(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL); if (kArchVariant == kMips64r6) {
GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
} else {
GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
}
}
void Assembler::muh(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
}
void Assembler::mulu(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
}
void Assembler::muhu(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
}
void Assembler::dmul(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
}
void Assembler::dmuh(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
}
void Assembler::dmulu(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
}
void Assembler::dmuhu(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
} }
void Assembler::mult(Register rs, Register rt) { void Assembler::mult(Register rs, Register rt) {
ASSERT(kArchVariant != kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT); GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
} }
void Assembler::multu(Register rs, Register rt) { void Assembler::multu(Register rs, Register rt) {
ASSERT(kArchVariant != kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU); GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
} }
@ -1242,11 +1534,35 @@ void Assembler::div(Register rs, Register rt) {
} }
void Assembler::div(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
}
void Assembler::mod(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
}
void Assembler::divu(Register rs, Register rt) { void Assembler::divu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU); GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
} }
void Assembler::divu(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
}
void Assembler::modu(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
}
void Assembler::daddu(Register rd, Register rs, Register rt) { void Assembler::daddu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU); GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
} }
@ -1272,11 +1588,35 @@ void Assembler::ddiv(Register rs, Register rt) {
} }
void Assembler::ddiv(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
}
void Assembler::dmod(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
}
void Assembler::ddivu(Register rs, Register rt) { void Assembler::ddivu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU); GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
} }
void Assembler::ddivu(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
}
void Assembler::dmodu(Register rd, Register rs, Register rt) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
}
// Logical. // Logical.
void Assembler::and_(Register rd, Register rs, Register rt) { void Assembler::and_(Register rd, Register rs, Register rt) {
@ -1566,6 +1906,32 @@ void Assembler::lui(Register rd, int32_t j) {
} }
void Assembler::aui(Register rs, Register rt, int32_t j) {
// This instruction uses same opcode as 'lui'. The difference in encoding is
// 'lui' has zero reg. for rs field.
ASSERT(is_uint16(j));
GenInstrImmediate(LUI, rs, rt, j);
}
void Assembler::daui(Register rs, Register rt, int32_t j) {
ASSERT(is_uint16(j));
GenInstrImmediate(DAUI, rs, rt, j);
}
void Assembler::dahi(Register rs, int32_t j) {
ASSERT(is_uint16(j));
GenInstrImmediate(REGIMM, rs, DAHI, j);
}
void Assembler::dati(Register rs, int32_t j) {
ASSERT(is_uint16(j));
GenInstrImmediate(REGIMM, rs, DATI, j);
}
void Assembler::ldl(Register rd, const MemOperand& rs) { void Assembler::ldl(Register rd, const MemOperand& rs) {
GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_); GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
} }
@ -1747,17 +2113,73 @@ void Assembler::movf(Register rd, Register rs, uint16_t cc) {
} }
void Assembler::sel(SecondaryField fmt, FPURegister fd,
FPURegister ft, FPURegister fs, uint8_t sel) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(fmt == D);
ASSERT(fmt == S);
Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
fs.code() << kFsShift | fd.code() << kFdShift | SEL;
emit(instr);
}
// GPR.
void Assembler::seleqz(Register rs, Register rt, Register rd) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
}
// FPR.
void Assembler::seleqz(SecondaryField fmt, FPURegister fd,
FPURegister ft, FPURegister fs) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(fmt == D);
ASSERT(fmt == S);
Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
fs.code() << kFsShift | fd.code() << kFdShift | SELEQZ_C;
emit(instr);
}
// GPR.
void Assembler::selnez(Register rs, Register rt, Register rd) {
ASSERT(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
}
// FPR.
void Assembler::selnez(SecondaryField fmt, FPURegister fd,
FPURegister ft, FPURegister fs) {
ASSERT(kArchVariant == kMips64r6);
ASSERT(fmt == D);
ASSERT(fmt == S);
Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
fs.code() << kFsShift | fd.code() << kFdShift | SELNEZ_C;
emit(instr);
}
// Bit twiddling. // Bit twiddling.
void Assembler::clz(Register rd, Register rs) { void Assembler::clz(Register rd, Register rs) {
// Clz instr requires same GPR number in 'rd' and 'rt' fields. if (kArchVariant != kMips64r6) {
GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ); // Clz instr requires same GPR number in 'rd' and 'rt' fields.
GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
} else {
GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
}
} }
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins. // Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb. // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
ASSERT(kArchVariant == kMips64r2); ASSERT((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS); GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
} }
@ -1765,13 +2187,12 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) { void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext. // Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb. // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
ASSERT(kArchVariant == kMips64r2); ASSERT(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT); GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
} }
void Assembler::pref(int32_t hint, const MemOperand& rs) { void Assembler::pref(int32_t hint, const MemOperand& rs) {
ASSERT(kArchVariant != kLoongson);
ASSERT(is_uint5(hint) && is_uint16(rs.offset_)); ASSERT(is_uint5(hint) && is_uint16(rs.offset_));
Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
| (rs.offset_); | (rs.offset_);
@ -1870,7 +2291,6 @@ void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs, void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft) { FPURegister ft) {
ASSERT(kArchVariant != kLoongson);
GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D); GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
} }
@ -2006,6 +2426,38 @@ void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
} }
void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
FPURegister fs) {
ASSERT(kArchVariant == kMips64r6);
ASSERT((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
}
void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
FPURegister fs) {
ASSERT(kArchVariant == kMips64r6);
ASSERT((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
}
void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
FPURegister fs) {
ASSERT(kArchVariant == kMips64r6);
ASSERT((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
}
void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
FPURegister fs) {
ASSERT(kArchVariant == kMips64r6);
ASSERT((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
}
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W); GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
} }
@ -2038,12 +2490,38 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
} }
// Conditions. // Conditions for >= MIPSr6.
void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
FPURegister fd, FPURegister fs, FPURegister ft) {
ASSERT(kArchVariant == kMips64r6);
ASSERT((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << kFtShift |
fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
emit(instr);
}
void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
ASSERT(kArchVariant == kMips64r6);
Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
emit(instr);
}
void Assembler::bc1nez(int16_t offset, FPURegister ft) {
ASSERT(kArchVariant == kMips64r6);
Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
emit(instr);
}
// Conditions for < MIPSr6.
void Assembler::c(FPUCondition cond, SecondaryField fmt, void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) { FPURegister fs, FPURegister ft, uint16_t cc) {
ASSERT(kArchVariant != kMips64r6);
ASSERT(is_uint3(cc)); ASSERT(is_uint3(cc));
ASSERT((fmt & ~(31 << kRsShift)) == 0); ASSERT((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
| cc << 8 | 3 << 4 | cond; | cc << 8 | 3 << 4 | cond;
emit(instr); emit(instr);
} }

View File

@ -457,11 +457,20 @@ class Assembler : public AssemblerBase {
// position. Links the label to the current position if it is still unbound. // position. Links the label to the current position if it is still unbound.
// Manages the jump elimination optimization if the second parameter is true. // Manages the jump elimination optimization if the second parameter is true.
int32_t branch_offset(Label* L, bool jump_elimination_allowed); int32_t branch_offset(Label* L, bool jump_elimination_allowed);
int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed);
int32_t branch_offset21(Label* L, bool jump_elimination_allowed);
int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed);
int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t o = branch_offset(L, jump_elimination_allowed); int32_t o = branch_offset(L, jump_elimination_allowed);
ASSERT((o & 3) == 0); // Assert the offset is aligned. ASSERT((o & 3) == 0); // Assert the offset is aligned.
return o >> 2; return o >> 2;
} }
int32_t shifted_branch_offset_compact(Label* L,
bool jump_elimination_allowed) {
int32_t o = branch_offset_compact(L, jump_elimination_allowed);
ASSERT((o & 3) == 0); // Assert the offset is aligned.
return o >> 2;
}
uint64_t jump_address(Label* L); uint64_t jump_address(Label* L);
// Puts a labels target address at the given position. // Puts a labels target address at the given position.
@ -617,15 +626,100 @@ class Assembler : public AssemblerBase {
beq(rs, rt, branch_offset(L, false) >> 2); beq(rs, rt, branch_offset(L, false) >> 2);
} }
void bgez(Register rs, int16_t offset); void bgez(Register rs, int16_t offset);
void bgezc(Register rt, int16_t offset);
void bgezc(Register rt, Label* L) {
bgezc(rt, branch_offset_compact(L, false)>>2);
}
void bgeuc(Register rs, Register rt, int16_t offset);
void bgeuc(Register rs, Register rt, Label* L) {
bgeuc(rs, rt, branch_offset_compact(L, false)>>2);
}
void bgec(Register rs, Register rt, int16_t offset);
void bgec(Register rs, Register rt, Label* L) {
bgec(rs, rt, branch_offset_compact(L, false)>>2);
}
void bgezal(Register rs, int16_t offset); void bgezal(Register rs, int16_t offset);
void bgezalc(Register rt, int16_t offset);
void bgezalc(Register rt, Label* L) {
bgezalc(rt, branch_offset_compact(L, false)>>2);
}
void bgezall(Register rs, int16_t offset);
void bgezall(Register rs, Label* L) {
bgezall(rs, branch_offset(L, false)>>2);
}
void bgtz(Register rs, int16_t offset); void bgtz(Register rs, int16_t offset);
void bgtzc(Register rt, int16_t offset);
void bgtzc(Register rt, Label* L) {
bgtzc(rt, branch_offset_compact(L, false)>>2);
}
void blez(Register rs, int16_t offset); void blez(Register rs, int16_t offset);
void blezc(Register rt, int16_t offset);
void blezc(Register rt, Label* L) {
blezc(rt, branch_offset_compact(L, false)>>2);
}
void bltz(Register rs, int16_t offset); void bltz(Register rs, int16_t offset);
void bltzc(Register rt, int16_t offset);
void bltzc(Register rt, Label* L) {
bltzc(rt, branch_offset_compact(L, false)>>2);
}
void bltuc(Register rs, Register rt, int16_t offset);
void bltuc(Register rs, Register rt, Label* L) {
bltuc(rs, rt, branch_offset_compact(L, false)>>2);
}
void bltc(Register rs, Register rt, int16_t offset);
void bltc(Register rs, Register rt, Label* L) {
bltc(rs, rt, branch_offset_compact(L, false)>>2);
}
void bltzal(Register rs, int16_t offset); void bltzal(Register rs, int16_t offset);
void blezalc(Register rt, int16_t offset);
void blezalc(Register rt, Label* L) {
blezalc(rt, branch_offset_compact(L, false)>>2);
}
void bltzalc(Register rt, int16_t offset);
void bltzalc(Register rt, Label* L) {
bltzalc(rt, branch_offset_compact(L, false)>>2);
}
void bgtzalc(Register rt, int16_t offset);
void bgtzalc(Register rt, Label* L) {
bgtzalc(rt, branch_offset_compact(L, false)>>2);
}
void beqzalc(Register rt, int16_t offset);
void beqzalc(Register rt, Label* L) {
beqzalc(rt, branch_offset_compact(L, false)>>2);
}
void beqc(Register rs, Register rt, int16_t offset);
void beqc(Register rs, Register rt, Label* L) {
beqc(rs, rt, branch_offset_compact(L, false)>>2);
}
void beqzc(Register rs, int32_t offset);
void beqzc(Register rs, Label* L) {
beqzc(rs, branch_offset21_compact(L, false)>>2);
}
void bnezalc(Register rt, int16_t offset);
void bnezalc(Register rt, Label* L) {
bnezalc(rt, branch_offset_compact(L, false)>>2);
}
void bnec(Register rs, Register rt, int16_t offset);
void bnec(Register rs, Register rt, Label* L) {
bnec(rs, rt, branch_offset_compact(L, false)>>2);
}
void bnezc(Register rt, int32_t offset);
void bnezc(Register rt, Label* L) {
bnezc(rt, branch_offset21_compact(L, false)>>2);
}
void bne(Register rs, Register rt, int16_t offset); void bne(Register rs, Register rt, int16_t offset);
void bne(Register rs, Register rt, Label* L) { void bne(Register rs, Register rt, Label* L) {
bne(rs, rt, branch_offset(L, false)>>2); bne(rs, rt, branch_offset(L, false)>>2);
} }
void bovc(Register rs, Register rt, int16_t offset);
void bovc(Register rs, Register rt, Label* L) {
bovc(rs, rt, branch_offset_compact(L, false)>>2);
}
void bnvc(Register rs, Register rt, int16_t offset);
void bnvc(Register rs, Register rt, Label* L) {
bnvc(rs, rt, branch_offset_compact(L, false)>>2);
}
// Never use the int16_t b(l)cond version with a branch offset // Never use the int16_t b(l)cond version with a branch offset
// instead of using the Label* version. // instead of using the Label* version.
@ -644,17 +738,34 @@ class Assembler : public AssemblerBase {
// Arithmetic. // Arithmetic.
void addu(Register rd, Register rs, Register rt); void addu(Register rd, Register rs, Register rt);
void subu(Register rd, Register rs, Register rt); void subu(Register rd, Register rs, Register rt);
void mult(Register rs, Register rt);
void multu(Register rs, Register rt);
void div(Register rs, Register rt); void div(Register rs, Register rt);
void divu(Register rs, Register rt); void divu(Register rs, Register rt);
void ddiv(Register rs, Register rt);
void ddivu(Register rs, Register rt);
void div(Register rd, Register rs, Register rt);
void divu(Register rd, Register rs, Register rt);
void ddiv(Register rd, Register rs, Register rt);
void ddivu(Register rd, Register rs, Register rt);
void mod(Register rd, Register rs, Register rt);
void modu(Register rd, Register rs, Register rt);
void dmod(Register rd, Register rs, Register rt);
void dmodu(Register rd, Register rs, Register rt);
void mul(Register rd, Register rs, Register rt); void mul(Register rd, Register rs, Register rt);
void muh(Register rd, Register rs, Register rt);
void mulu(Register rd, Register rs, Register rt);
void muhu(Register rd, Register rs, Register rt);
void mult(Register rs, Register rt);
void multu(Register rs, Register rt);
void dmul(Register rd, Register rs, Register rt);
void dmuh(Register rd, Register rs, Register rt);
void dmulu(Register rd, Register rs, Register rt);
void dmuhu(Register rd, Register rs, Register rt);
void daddu(Register rd, Register rs, Register rt); void daddu(Register rd, Register rs, Register rt);
void dsubu(Register rd, Register rs, Register rt); void dsubu(Register rd, Register rs, Register rt);
void dmult(Register rs, Register rt); void dmult(Register rs, Register rt);
void dmultu(Register rs, Register rt); void dmultu(Register rs, Register rt);
void ddiv(Register rs, Register rt);
void ddivu(Register rs, Register rt);
void addiu(Register rd, Register rs, int32_t j); void addiu(Register rd, Register rs, int32_t j);
void daddiu(Register rd, Register rs, int32_t j); void daddiu(Register rd, Register rs, int32_t j);
@ -669,6 +780,10 @@ class Assembler : public AssemblerBase {
void ori(Register rd, Register rs, int32_t j); void ori(Register rd, Register rs, int32_t j);
void xori(Register rd, Register rs, int32_t j); void xori(Register rd, Register rs, int32_t j);
void lui(Register rd, int32_t j); void lui(Register rd, int32_t j);
void aui(Register rs, Register rt, int32_t j);
void daui(Register rs, Register rt, int32_t j);
void dahi(Register rs, int32_t j);
void dati(Register rs, int32_t j);
// Shifts. // Shifts.
// Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
@ -751,6 +866,15 @@ class Assembler : public AssemblerBase {
void movt(Register rd, Register rs, uint16_t cc = 0); void movt(Register rd, Register rs, uint16_t cc = 0);
void movf(Register rd, Register rs, uint16_t cc = 0); void movf(Register rd, Register rs, uint16_t cc = 0);
void sel(SecondaryField fmt, FPURegister fd, FPURegister ft,
FPURegister fs, uint8_t sel);
void seleqz(Register rs, Register rt, Register rd);
void seleqz(SecondaryField fmt, FPURegister fd, FPURegister ft,
FPURegister fs);
void selnez(Register rs, Register rt, Register rd);
void selnez(SecondaryField fmt, FPURegister fd, FPURegister ft,
FPURegister fs);
// Bit twiddling. // Bit twiddling.
void clz(Register rd, Register rs); void clz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size); void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
@ -810,6 +934,11 @@ class Assembler : public AssemblerBase {
void ceil_l_s(FPURegister fd, FPURegister fs); void ceil_l_s(FPURegister fd, FPURegister fs);
void ceil_l_d(FPURegister fd, FPURegister fs); void ceil_l_d(FPURegister fd, FPURegister fs);
void min(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
void mina(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
void max(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
void maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
void cvt_s_w(FPURegister fd, FPURegister fs); void cvt_s_w(FPURegister fd, FPURegister fs);
void cvt_s_l(FPURegister fd, FPURegister fs); void cvt_s_l(FPURegister fd, FPURegister fs);
void cvt_s_d(FPURegister fd, FPURegister fs); void cvt_s_d(FPURegister fd, FPURegister fs);
@ -818,14 +947,31 @@ class Assembler : public AssemblerBase {
void cvt_d_l(FPURegister fd, FPURegister fs); void cvt_d_l(FPURegister fd, FPURegister fs);
void cvt_d_s(FPURegister fd, FPURegister fs); void cvt_d_s(FPURegister fd, FPURegister fs);
// Conditions and branches. // Conditions and branches for MIPSr6.
void cmp(FPUCondition cond, SecondaryField fmt,
FPURegister fd, FPURegister ft, FPURegister fs);
void bc1eqz(int16_t offset, FPURegister ft);
void bc1eqz(Label* L, FPURegister ft) {
bc1eqz(branch_offset(L, false)>>2, ft);
}
void bc1nez(int16_t offset, FPURegister ft);
void bc1nez(Label* L, FPURegister ft) {
bc1nez(branch_offset(L, false)>>2, ft);
}
// Conditions and branches for non MIPSr6.
void c(FPUCondition cond, SecondaryField fmt, void c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc = 0); FPURegister ft, FPURegister fs, uint16_t cc = 0);
void bc1f(int16_t offset, uint16_t cc = 0); void bc1f(int16_t offset, uint16_t cc = 0);
void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); } void bc1f(Label* L, uint16_t cc = 0) {
bc1f(branch_offset(L, false)>>2, cc);
}
void bc1t(int16_t offset, uint16_t cc = 0); void bc1t(int16_t offset, uint16_t cc = 0);
void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); } void bc1t(Label* L, uint16_t cc = 0) {
bc1t(branch_offset(L, false)>>2, cc);
}
void fcmp(FPURegister src1, const double src2, FPUCondition cond); void fcmp(FPURegister src1, const double src2, FPUCondition cond);
// Check the code size generated from label to here. // Check the code size generated from label to here.

View File

@ -1016,17 +1016,28 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Check if LESS condition is satisfied. If true, move conditionally // Check if LESS condition is satisfied. If true, move conditionally
// result to v0. // result to v0.
__ c(OLT, D, f12, f14); if (kArchVariant != kMips64r6) {
__ Movt(v0, a4); __ c(OLT, D, f12, f14);
// Use previous check to store conditionally to v0 oposite condition __ Movt(v0, a4);
// (GREATER). If rhs is equal to lhs, this will be corrected in next // Use previous check to store conditionally to v0 oposite condition
// check. // (GREATER). If rhs is equal to lhs, this will be corrected in next
__ Movf(v0, a5); // check.
// Check if EQUAL condition is satisfied. If true, move conditionally __ Movf(v0, a5);
// result to v0. // Check if EQUAL condition is satisfied. If true, move conditionally
__ c(EQ, D, f12, f14); // result to v0.
__ Movt(v0, a6); __ c(EQ, D, f12, f14);
__ Movt(v0, a6);
} else {
Label skip;
__ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
__ mov(v0, a4); // Return LESS as result.
__ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
__ mov(v0, a6); // Return EQUAL as result.
__ mov(v0, a5); // Return GREATER as result.
__ bind(&skip);
}
__ Ret(); __ Ret();
__ bind(&nan); __ bind(&nan);

View File

@ -295,6 +295,8 @@ Instruction::Type Instruction::InstructionType() const {
case COP1: // Coprocessor instructions. case COP1: // Coprocessor instructions.
switch (RsFieldRawNoAssert()) { switch (RsFieldRawNoAssert()) {
case BC1: // Branch on coprocessor condition. case BC1: // Branch on coprocessor condition.
case BC1EQZ:
case BC1NEZ:
return kImmediateType; return kImmediateType;
default: default:
return kRegisterType; return kRegisterType;
@ -322,6 +324,8 @@ Instruction::Type Instruction::InstructionType() const {
case BNEL: case BNEL:
case BLEZL: case BLEZL:
case BGTZL: case BGTZL:
case BEQZC:
case BNEZC:
case LB: case LB:
case LH: case LH:
case LWL: case LWL:

View File

@ -17,21 +17,17 @@
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n") #define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
enum ArchVariants { enum ArchVariants {
kMips32r2, kMips64r2,
kMips32r1, kMips64r6
kLoongson,
kMips64r2
}; };
#ifdef _MIPS_ARCH_MIPS64R2 #ifdef _MIPS_ARCH_MIPS64R2
static const ArchVariants kArchVariant = kMips64r2; static const ArchVariants kArchVariant = kMips64r2;
#elif _MIPS_ARCH_LOONGSON #elif _MIPS_ARCH_MIPS64R6
// The loongson flag refers to the LOONGSON architectures based on MIPS-III, static const ArchVariants kArchVariant = kMips64r6;
// which predates (and is a subset of) the mips32r2 and r1 architectures.
static const ArchVariants kArchVariant = kLoongson;
#else #else
static const ArchVariants kArchVariant = kMips64r1; static const ArchVariants kArchVariant = kMips64r2;
#endif #endif
@ -228,6 +224,8 @@ const int kLuiShift = 16;
const int kImm16Shift = 0; const int kImm16Shift = 0;
const int kImm16Bits = 16; const int kImm16Bits = 16;
const int kImm21Shift = 0;
const int kImm21Bits = 21;
const int kImm26Shift = 0; const int kImm26Shift = 0;
const int kImm26Bits = 26; const int kImm26Bits = 26;
const int kImm28Shift = 0; const int kImm28Shift = 0;
@ -295,15 +293,17 @@ enum Opcode {
ANDI = ((1 << 3) + 4) << kOpcodeShift, ANDI = ((1 << 3) + 4) << kOpcodeShift,
ORI = ((1 << 3) + 5) << kOpcodeShift, ORI = ((1 << 3) + 5) << kOpcodeShift,
XORI = ((1 << 3) + 6) << kOpcodeShift, XORI = ((1 << 3) + 6) << kOpcodeShift,
LUI = ((1 << 3) + 7) << kOpcodeShift, LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family.
DAUI = ((3 << 3) + 5) << kOpcodeShift,
BEQC = ((2 << 3) + 0) << kOpcodeShift,
COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class. COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
BEQL = ((2 << 3) + 4) << kOpcodeShift, BEQL = ((2 << 3) + 4) << kOpcodeShift,
BNEL = ((2 << 3) + 5) << kOpcodeShift, BNEL = ((2 << 3) + 5) << kOpcodeShift,
BLEZL = ((2 << 3) + 6) << kOpcodeShift, BLEZL = ((2 << 3) + 6) << kOpcodeShift,
BGTZL = ((2 << 3) + 7) << kOpcodeShift, BGTZL = ((2 << 3) + 7) << kOpcodeShift,
DADDI = ((3 << 3) + 0) << kOpcodeShift, DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC.
DADDIU = ((3 << 3) + 1) << kOpcodeShift, DADDIU = ((3 << 3) + 1) << kOpcodeShift,
LDL = ((3 << 3) + 2) << kOpcodeShift, LDL = ((3 << 3) + 2) << kOpcodeShift,
LDR = ((3 << 3) + 3) << kOpcodeShift, LDR = ((3 << 3) + 3) << kOpcodeShift,
@ -330,6 +330,7 @@ enum Opcode {
LWC1 = ((6 << 3) + 1) << kOpcodeShift, LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LLD = ((6 << 3) + 4) << kOpcodeShift, LLD = ((6 << 3) + 4) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift, LDC1 = ((6 << 3) + 5) << kOpcodeShift,
BEQZC = ((6 << 3) + 6) << kOpcodeShift,
LD = ((6 << 3) + 7) << kOpcodeShift, LD = ((6 << 3) + 7) << kOpcodeShift,
PREF = ((6 << 3) + 3) << kOpcodeShift, PREF = ((6 << 3) + 3) << kOpcodeShift,
@ -337,6 +338,7 @@ enum Opcode {
SWC1 = ((7 << 3) + 1) << kOpcodeShift, SWC1 = ((7 << 3) + 1) << kOpcodeShift,
SCD = ((7 << 3) + 4) << kOpcodeShift, SCD = ((7 << 3) + 4) << kOpcodeShift,
SDC1 = ((7 << 3) + 5) << kOpcodeShift, SDC1 = ((7 << 3) + 5) << kOpcodeShift,
BNEZC = ((7 << 3) + 6) << kOpcodeShift,
SD = ((7 << 3) + 7) << kOpcodeShift, SD = ((7 << 3) + 7) << kOpcodeShift,
COP1X = ((1 << 4) + 3) << kOpcodeShift COP1X = ((1 << 4) + 3) << kOpcodeShift
@ -359,6 +361,8 @@ enum SecondaryField {
BREAK = ((1 << 3) + 5), BREAK = ((1 << 3) + 5),
MFHI = ((2 << 3) + 0), MFHI = ((2 << 3) + 0),
CLZ_R6 = ((2 << 3) + 0),
CLO_R6 = ((2 << 3) + 1),
MFLO = ((2 << 3) + 2), MFLO = ((2 << 3) + 2),
DSLLV = ((2 << 3) + 4), DSLLV = ((2 << 3) + 4),
DSRLV = ((2 << 3) + 6), DSRLV = ((2 << 3) + 6),
@ -394,7 +398,9 @@ enum SecondaryField {
TLT = ((6 << 3) + 2), TLT = ((6 << 3) + 2),
TLTU = ((6 << 3) + 3), TLTU = ((6 << 3) + 3),
TEQ = ((6 << 3) + 4), TEQ = ((6 << 3) + 4),
SELEQZ_S = ((6 << 3) + 5),
TNE = ((6 << 3) + 6), TNE = ((6 << 3) + 6),
SELNEZ_S = ((6 << 3) + 7),
DSLL = ((7 << 3) + 0), DSLL = ((7 << 3) + 0),
DSRL = ((7 << 3) + 2), DSRL = ((7 << 3) + 2),
@ -402,6 +408,23 @@ enum SecondaryField {
DSLL32 = ((7 << 3) + 4), DSLL32 = ((7 << 3) + 4),
DSRL32 = ((7 << 3) + 6), DSRL32 = ((7 << 3) + 6),
DSRA32 = ((7 << 3) + 7), DSRA32 = ((7 << 3) + 7),
// Multiply integers in r6.
MUL_MUH = ((3 << 3) + 0), // MUL, MUH.
MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U.
D_MUL_MUH = ((7 << 2) + 0), // DMUL, DMUH.
D_MUL_MUH_U = ((7 << 2) + 1), // DMUL_U, DMUH_U.
MUL_OP = ((0 << 3) + 2),
MUH_OP = ((0 << 3) + 3),
DIV_OP = ((0 << 3) + 2),
MOD_OP = ((0 << 3) + 3),
DIV_MOD = ((3 << 3) + 2),
DIV_MOD_U = ((3 << 3) + 3),
D_DIV_MOD = ((3 << 3) + 6),
D_DIV_MOD_U = ((3 << 3) + 7),
// drotr in special4? // drotr in special4?
// SPECIAL2 Encoding of Function Field. // SPECIAL2 Encoding of Function Field.
@ -426,6 +449,9 @@ enum SecondaryField {
BGEZ = ((0 << 3) + 1) << 16, BGEZ = ((0 << 3) + 1) << 16,
BLTZAL = ((2 << 3) + 0) << 16, BLTZAL = ((2 << 3) + 0) << 16,
BGEZAL = ((2 << 3) + 1) << 16, BGEZAL = ((2 << 3) + 1) << 16,
BGEZALL = ((2 << 3) + 3) << 16,
DAHI = ((0 << 3) + 6) << 16,
DATI = ((3 << 3) + 6) << 16,
// COP1 Encoding of rs Field. // COP1 Encoding of rs Field.
MFC1 = ((0 << 3) + 0) << 21, MFC1 = ((0 << 3) + 0) << 21,
@ -472,6 +498,10 @@ enum SecondaryField {
TRUNC_W_D = ((1 << 3) + 5), TRUNC_W_D = ((1 << 3) + 5),
CEIL_W_D = ((1 << 3) + 6), CEIL_W_D = ((1 << 3) + 6),
FLOOR_W_D = ((1 << 3) + 7), FLOOR_W_D = ((1 << 3) + 7),
MIN = ((3 << 3) + 4),
MINA = ((3 << 3) + 5),
MAX = ((3 << 3) + 6),
MAXA = ((3 << 3) + 7),
CVT_S_D = ((4 << 3) + 0), CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4), CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5), CVT_L_D = ((4 << 3) + 5),
@ -488,6 +518,47 @@ enum SecondaryField {
CVT_D_W = ((4 << 3) + 1), CVT_D_W = ((4 << 3) + 1),
CVT_S_L = ((4 << 3) + 0), CVT_S_L = ((4 << 3) + 0),
CVT_D_L = ((4 << 3) + 1), CVT_D_L = ((4 << 3) + 1),
BC1EQZ = ((2 << 2) + 1) << 21,
BC1NEZ = ((3 << 2) + 1) << 21,
// COP1 CMP positive predicates Bit 5..4 = 00.
CMP_AF = ((0 << 3) + 0),
CMP_UN = ((0 << 3) + 1),
CMP_EQ = ((0 << 3) + 2),
CMP_UEQ = ((0 << 3) + 3),
CMP_LT = ((0 << 3) + 4),
CMP_ULT = ((0 << 3) + 5),
CMP_LE = ((0 << 3) + 6),
CMP_ULE = ((0 << 3) + 7),
CMP_SAF = ((1 << 3) + 0),
CMP_SUN = ((1 << 3) + 1),
CMP_SEQ = ((1 << 3) + 2),
CMP_SUEQ = ((1 << 3) + 3),
CMP_SSLT = ((1 << 3) + 4),
CMP_SSULT = ((1 << 3) + 5),
CMP_SLE = ((1 << 3) + 6),
CMP_SULE = ((1 << 3) + 7),
// COP1 CMP negative predicates Bit 5..4 = 01.
CMP_AT = ((2 << 3) + 0), // Reserved, not implemented.
CMP_OR = ((2 << 3) + 1),
CMP_UNE = ((2 << 3) + 2),
CMP_NE = ((2 << 3) + 3),
CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented.
CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented.
CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented.
CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented.
CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented.
CMP_SOR = ((3 << 3) + 1),
CMP_SUNE = ((3 << 3) + 2),
CMP_SNE = ((3 << 3) + 3),
CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented.
CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented.
CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
SEL = ((2 << 3) + 0),
SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS. // COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field. // COP1X Encoding of Function Field.
MADD_D = ((4 << 3) + 1), MADD_D = ((4 << 3) + 1),
@ -497,9 +568,9 @@ enum SecondaryField {
// ----- Emulated conditions. // ----- Emulated conditions.
// On MIPS we use this enum to abstract from conditionnal branch instructions. // On MIPS we use this enum to abstract from conditional branch instructions.
// The 'U' prefix is used to specify unsigned comparisons. // The 'U' prefix is used to specify unsigned comparisons.
// Oppposite conditions must be paired as odd/even numbers // Opposite conditions must be paired as odd/even numbers
// because 'NegateCondition' function flips LSB to negate condition. // because 'NegateCondition' function flips LSB to negate condition.
enum Condition { enum Condition {
// Any value < 0 is considered no_condition. // Any value < 0 is considered no_condition.
@ -833,6 +904,11 @@ class Instruction {
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift); return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
} }
inline int32_t Imm21Value() const {
ASSERT(InstructionType() == kImmediateType);
return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
}
inline int32_t Imm26Value() const { inline int32_t Imm26Value() const {
ASSERT(InstructionType() == kJumpType); ASSERT(InstructionType() == kJumpType);
return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift); return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);

View File

@ -86,6 +86,7 @@ class Decoder {
void PrintUImm16(Instruction* instr); void PrintUImm16(Instruction* instr);
void PrintSImm16(Instruction* instr); void PrintSImm16(Instruction* instr);
void PrintXImm16(Instruction* instr); void PrintXImm16(Instruction* instr);
void PrintXImm21(Instruction* instr);
void PrintXImm26(Instruction* instr); void PrintXImm26(Instruction* instr);
void PrintCode(Instruction* instr); // For break and trap instructions. void PrintCode(Instruction* instr); // For break and trap instructions.
// Printing of instruction name. // Printing of instruction name.
@ -247,6 +248,13 @@ void Decoder::PrintXImm16(Instruction* instr) {
} }
// Print 21-bit immediate value.
void Decoder::PrintXImm21(Instruction* instr) {
uint32_t imm = instr->Imm21Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
// Print 26-bit immediate value. // Print 26-bit immediate value.
void Decoder::PrintXImm26(Instruction* instr) { void Decoder::PrintXImm26(Instruction* instr) {
uint32_t imm = instr->Imm26Value() << kImmFieldShift; uint32_t imm = instr->Imm26Value() << kImmFieldShift;
@ -361,7 +369,11 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintXImm16(instr); PrintXImm16(instr);
} }
return 6; return 6;
} else { } else if (format[3] == '2' && format[4] == '1') {
ASSERT(STRING_STARTS_WITH(format, "imm21x"));
PrintXImm21(instr);
return 6;
} else if (format[3] == '2' && format[4] == '6') {
ASSERT(STRING_STARTS_WITH(format, "imm26x")); ASSERT(STRING_STARTS_WITH(format, "imm26x"));
PrintXImm26(instr); PrintXImm26(instr);
return 6; return 6;
@ -466,9 +478,6 @@ int Decoder::DecodeTypeRegister(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) { switch (instr->OpcodeFieldRaw()) {
case COP1: // Coprocessor instructions. case COP1: // Coprocessor instructions.
switch (instr->RsFieldRaw()) { switch (instr->RsFieldRaw()) {
case BC1: // bc1 handled in DecodeTypeImmediate.
UNREACHABLE();
break;
case MFC1: case MFC1:
Format(instr, "mfc1 'rt, 'fs"); Format(instr, "mfc1 'rt, 'fs");
break; break;
@ -582,14 +591,8 @@ int Decoder::DecodeTypeRegister(Instruction* instr) {
break; break;
} }
break; break;
case S:
UNIMPLEMENTED_MIPS();
break;
case W: case W:
switch (instr->FunctionFieldRaw()) { switch (instr->FunctionFieldRaw()) {
case CVT_S_W: // Convert word to float (single).
Format(instr, "cvt.s.w 'fd, 'fs");
break;
case CVT_D_W: // Convert word to double. case CVT_D_W: // Convert word to double.
Format(instr, "cvt.d.w 'fd, 'fs"); Format(instr, "cvt.d.w 'fd, 'fs");
break; break;
@ -605,13 +608,40 @@ int Decoder::DecodeTypeRegister(Instruction* instr) {
case CVT_S_L: case CVT_S_L:
Format(instr, "cvt.s.l 'fd, 'fs"); Format(instr, "cvt.s.l 'fd, 'fs");
break; break;
case CMP_UN:
Format(instr, "cmp.un.d 'fd, 'fs, 'ft");
break;
case CMP_EQ:
Format(instr, "cmp.eq.d 'fd, 'fs, 'ft");
break;
case CMP_UEQ:
Format(instr, "cmp.ueq.d 'fd, 'fs, 'ft");
break;
case CMP_LT:
Format(instr, "cmp.lt.d 'fd, 'fs, 'ft");
break;
case CMP_ULT:
Format(instr, "cmp.ult.d 'fd, 'fs, 'ft");
break;
case CMP_LE:
Format(instr, "cmp.le.d 'fd, 'fs, 'ft");
break;
case CMP_ULE:
Format(instr, "cmp.ule.d 'fd, 'fs, 'ft");
break;
case CMP_OR:
Format(instr, "cmp.or.d 'fd, 'fs, 'ft");
break;
case CMP_UNE:
Format(instr, "cmp.une.d 'fd, 'fs, 'ft");
break;
case CMP_NE:
Format(instr, "cmp.ne.d 'fd, 'fs, 'ft");
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
break; break;
case PS:
UNIMPLEMENTED_MIPS();
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
@ -634,13 +664,24 @@ int Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "jalr 'rs"); Format(instr, "jalr 'rs");
break; break;
case SLL: case SLL:
if ( 0x0 == static_cast<int>(instr->InstructionBits())) if (0x0 == static_cast<int>(instr->InstructionBits()))
Format(instr, "nop"); Format(instr, "nop");
else else
Format(instr, "sll 'rd, 'rt, 'sa"); Format(instr, "sll 'rd, 'rt, 'sa");
break; break;
case DSLL: case DSLL:
Format(instr, "dsll 'rd, 'rt, 'sa"); Format(instr, "dsll 'rd, 'rt, 'sa");
break;
case D_MUL_MUH: // Equals to DMUL.
if (kArchVariant != kMips64r6) {
Format(instr, "dmult 'rs, 'rt");
} else {
if (instr->SaValue() == MUL_OP) {
Format(instr, "dmul 'rd, 'rs, 'rt");
} else {
Format(instr, "dmuh 'rd, 'rs, 'rt");
}
}
break; break;
case DSLL32: case DSLL32:
Format(instr, "dsll32 'rd, 'rt, 'sa"); Format(instr, "dsll32 'rd, 'rt, 'sa");
@ -714,34 +755,98 @@ int Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "dsrav 'rd, 'rt, 'rs"); Format(instr, "dsrav 'rd, 'rt, 'rs");
break; break;
case MFHI: case MFHI:
Format(instr, "mfhi 'rd"); if (instr->Bits(25, 16) == 0) {
Format(instr, "mfhi 'rd");
} else {
if ((instr->FunctionFieldRaw() == CLZ_R6)
&& (instr->FdValue() == 1)) {
Format(instr, "clz 'rd, 'rs");
} else if ((instr->FunctionFieldRaw() == CLO_R6)
&& (instr->FdValue() == 1)) {
Format(instr, "clo 'rd, 'rs");
}
}
break; break;
case MFLO: case MFLO:
Format(instr, "mflo 'rd"); Format(instr, "mflo 'rd");
break; break;
case MULT: case D_MUL_MUH_U: // Equals to DMULTU.
Format(instr, "mult 'rs, 'rt"); if (kArchVariant != kMips64r6) {
Format(instr, "dmultu 'rs, 'rt");
} else {
if (instr->SaValue() == MUL_OP) {
Format(instr, "dmulu 'rd, 'rs, 'rt");
} else {
Format(instr, "dmuhu 'rd, 'rs, 'rt");
}
}
break; break;
case DMULT: case MULT: // @Mips64r6 == MUL_MUH.
Format(instr, "dmult 'rs, 'rt"); if (kArchVariant != kMips64r6) {
Format(instr, "mult 'rs, 'rt");
} else {
if (instr->SaValue() == MUL_OP) {
Format(instr, "mul 'rd, 'rs, 'rt");
} else {
Format(instr, "muh 'rd, 'rs, 'rt");
}
}
break; break;
case MULTU: case MULTU: // @Mips64r6 == MUL_MUH_U.
Format(instr, "multu 'rs, 'rt"); if (kArchVariant != kMips64r6) {
Format(instr, "multu 'rs, 'rt");
} else {
if (instr->SaValue() == MUL_OP) {
Format(instr, "mulu 'rd, 'rs, 'rt");
} else {
Format(instr, "muhu 'rd, 'rs, 'rt");
}
}
break; break;
case DMULTU: case DIV: // @Mips64r6 == DIV_MOD.
Format(instr, "dmultu 'rs, 'rt"); if (kArchVariant != kMips64r6) {
Format(instr, "div 'rs, 'rt");
} else {
if (instr->SaValue() == DIV_OP) {
Format(instr, "div 'rd, 'rs, 'rt");
} else {
Format(instr, "mod 'rd, 'rs, 'rt");
}
}
break; break;
case DIV: case DDIV: // @Mips64r6 == D_DIV_MOD.
Format(instr, "div 'rs, 'rt"); if (kArchVariant != kMips64r6) {
Format(instr, "ddiv 'rs, 'rt");
} else {
if (instr->SaValue() == DIV_OP) {
Format(instr, "ddiv 'rd, 'rs, 'rt");
} else {
Format(instr, "dmod 'rd, 'rs, 'rt");
}
}
break; break;
case DDIV: case DIVU: // @Mips64r6 == DIV_MOD_U.
Format(instr, "ddiv 'rs, 'rt"); if (kArchVariant != kMips64r6) {
Format(instr, "divu 'rs, 'rt");
} else {
if (instr->SaValue() == DIV_OP) {
Format(instr, "divu 'rd, 'rs, 'rt");
} else {
Format(instr, "modu 'rd, 'rs, 'rt");
}
}
break; break;
case DIVU: case DDIVU: // @Mips64r6 == D_DIV_MOD_U.
Format(instr, "divu 'rs, 'rt"); if (kArchVariant != kMips64r6) {
break; Format(instr, "ddivu 'rs, 'rt");
case DDIVU: } else {
Format(instr, "ddivu 'rs, 'rt"); if (instr->SaValue() == DIV_OP) {
Format(instr, "ddivu 'rd, 'rs, 'rt");
} else {
Format(instr, "dmodu 'rd, 'rs, 'rt");
}
}
break; break;
case ADD: case ADD:
Format(instr, "add 'rd, 'rs, 'rt"); Format(instr, "add 'rd, 'rs, 'rt");
@ -824,6 +929,12 @@ int Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "movf 'rd, 'rs, 'bc"); Format(instr, "movf 'rd, 'rs, 'bc");
} }
break; break;
case SELEQZ_S:
Format(instr, "seleqz 'rd, 'rs, 'rt");
break;
case SELNEZ_S:
Format(instr, "selnez 'rd, 'rs, 'rt");
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
@ -834,7 +945,9 @@ int Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "mul 'rd, 'rs, 'rt"); Format(instr, "mul 'rd, 'rs, 'rt");
break; break;
case CLZ: case CLZ:
Format(instr, "clz 'rd, 'rs"); if (kArchVariant != kMips64r6) {
Format(instr, "clz 'rd, 'rs");
}
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -843,19 +956,11 @@ int Decoder::DecodeTypeRegister(Instruction* instr) {
case SPECIAL3: case SPECIAL3:
switch (instr->FunctionFieldRaw()) { switch (instr->FunctionFieldRaw()) {
case INS: { case INS: {
if (kArchVariant == kMips64r2) { Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
} else {
Unknown(instr);
}
break; break;
} }
case EXT: { case EXT: {
if (kArchVariant == kMips64r2) { Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
} else {
Unknown(instr);
}
break; break;
} }
default: default:
@ -871,7 +976,6 @@ int Decoder::DecodeTypeRegister(Instruction* instr) {
void Decoder::DecodeTypeImmediate(Instruction* instr) { void Decoder::DecodeTypeImmediate(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) { switch (instr->OpcodeFieldRaw()) {
// ------------- REGIMM class.
case COP1: case COP1:
switch (instr->RsFieldRaw()) { switch (instr->RsFieldRaw()) {
case BC1: case BC1:
@ -881,10 +985,150 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "bc1f 'bc, 'imm16u"); Format(instr, "bc1f 'bc, 'imm16u");
} }
break; break;
case BC1EQZ:
Format(instr, "bc1eqz 'ft, 'imm16u");
break;
case BC1NEZ:
Format(instr, "bc1nez 'ft, 'imm16u");
break;
case W: // CMP.S instruction.
switch (instr->FunctionValue()) {
case CMP_AF:
Format(instr, "cmp.af.S 'ft, 'fs, 'fd");
break;
case CMP_UN:
Format(instr, "cmp.un.S 'ft, 'fs, 'fd");
break;
case CMP_EQ:
Format(instr, "cmp.eq.S 'ft, 'fs, 'fd");
break;
case CMP_UEQ:
Format(instr, "cmp.ueq.S 'ft, 'fs, 'fd");
break;
case CMP_LT:
Format(instr, "cmp.lt.S 'ft, 'fs, 'fd");
break;
case CMP_ULT:
Format(instr, "cmp.ult.S 'ft, 'fs, 'fd");
break;
case CMP_LE:
Format(instr, "cmp.le.S 'ft, 'fs, 'fd");
break;
case CMP_ULE:
Format(instr, "cmp.ule.S 'ft, 'fs, 'fd");
break;
case CMP_OR:
Format(instr, "cmp.or.S 'ft, 'fs, 'fd");
break;
case CMP_UNE:
Format(instr, "cmp.une.S 'ft, 'fs, 'fd");
break;
case CMP_NE:
Format(instr, "cmp.ne.S 'ft, 'fs, 'fd");
break;
default:
UNREACHABLE();
}
break;
case L: // CMP.D instruction.
switch (instr->FunctionValue()) {
case CMP_AF:
Format(instr, "cmp.af.D 'ft, 'fs, 'fd");
break;
case CMP_UN:
Format(instr, "cmp.un.D 'ft, 'fs, 'fd");
break;
case CMP_EQ:
Format(instr, "cmp.eq.D 'ft, 'fs, 'fd");
break;
case CMP_UEQ:
Format(instr, "cmp.ueq.D 'ft, 'fs, 'fd");
break;
case CMP_LT:
Format(instr, "cmp.lt.D 'ft, 'fs, 'fd");
break;
case CMP_ULT:
Format(instr, "cmp.ult.D 'ft, 'fs, 'fd");
break;
case CMP_LE:
Format(instr, "cmp.le.D 'ft, 'fs, 'fd");
break;
case CMP_ULE:
Format(instr, "cmp.ule.D 'ft, 'fs, 'fd");
break;
case CMP_OR:
Format(instr, "cmp.or.D 'ft, 'fs, 'fd");
break;
case CMP_UNE:
Format(instr, "cmp.une.D 'ft, 'fs, 'fd");
break;
case CMP_NE:
Format(instr, "cmp.ne.D 'ft, 'fs, 'fd");
break;
default:
UNREACHABLE();
}
break;
case S:
switch (instr->FunctionValue()) {
case SEL:
Format(instr, "sel.S 'ft, 'fs, 'fd");
break;
case SELEQZ_C:
Format(instr, "seleqz.S 'ft, 'fs, 'fd");
break;
case SELNEZ_C:
Format(instr, "selnez.S 'ft, 'fs, 'fd");
break;
case MIN:
Format(instr, "min.S 'ft, 'fs, 'fd");
break;
case MINA:
Format(instr, "mina.S 'ft, 'fs, 'fd");
break;
case MAX:
Format(instr, "max.S 'ft, 'fs, 'fd");
break;
case MAXA:
Format(instr, "maxa.S 'ft, 'fs, 'fd");
break;
default:
UNREACHABLE();
}
break;
case D:
switch (instr->FunctionValue()) {
case SEL:
Format(instr, "sel.D 'ft, 'fs, 'fd");
break;
case SELEQZ_C:
Format(instr, "seleqz.D 'ft, 'fs, 'fd");
break;
case SELNEZ_C:
Format(instr, "selnez.D 'ft, 'fs, 'fd");
break;
case MIN:
Format(instr, "min.D 'ft, 'fs, 'fd");
break;
case MINA:
Format(instr, "mina.D 'ft, 'fs, 'fd");
break;
case MAX:
Format(instr, "max.D 'ft, 'fs, 'fd");
break;
case MAXA:
Format(instr, "maxa.D 'ft, 'fs, 'fd");
break;
default:
UNREACHABLE();
}
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
break; // Case COP1. break; // Case COP1.
// ------------- REGIMM class.
case REGIMM: case REGIMM:
switch (instr->RtFieldRaw()) { switch (instr->RtFieldRaw()) {
case BLTZ: case BLTZ:
@ -899,6 +1143,15 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
case BGEZAL: case BGEZAL:
Format(instr, "bgezal 'rs, 'imm16u"); Format(instr, "bgezal 'rs, 'imm16u");
break; break;
case BGEZALL:
Format(instr, "bgezall 'rs, 'imm16u");
break;
case DAHI:
Format(instr, "dahi 'rs, 'imm16u");
break;
case DATI:
Format(instr, "dati 'rs, 'imm16u");
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
@ -911,17 +1164,105 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "bne 'rs, 'rt, 'imm16u"); Format(instr, "bne 'rs, 'rt, 'imm16u");
break; break;
case BLEZ: case BLEZ:
Format(instr, "blez 'rs, 'imm16u"); if ((instr->RtFieldRaw() == 0)
&& (instr->RsFieldRaw() != 0)) {
Format(instr, "blez 'rs, 'imm16u");
} else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
&& (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
Format(instr, "bgeuc 'rs, 'rt, 'imm16u");
} else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
&& (instr->RtFieldRaw() != 0)) {
Format(instr, "bgezalc 'rs, 'imm16u");
} else if ((instr->RsFieldRaw() == 0)
&& (instr->RtFieldRaw() != 0)) {
Format(instr, "blezalc 'rs, 'imm16u");
} else {
UNREACHABLE();
}
break; break;
case BGTZ: case BGTZ:
Format(instr, "bgtz 'rs, 'imm16u"); if ((instr->RtFieldRaw() == 0)
&& (instr->RsFieldRaw() != 0)) {
Format(instr, "bgtz 'rs, 'imm16u");
} else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
&& (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
Format(instr, "bltuc 'rs, 'rt, 'imm16u");
} else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
&& (instr->RtFieldRaw() != 0)) {
Format(instr, "bltzalc 'rt, 'imm16u");
} else if ((instr->RsFieldRaw() == 0)
&& (instr->RtFieldRaw() != 0)) {
Format(instr, "bgtzalc 'rt, 'imm16u");
} else {
UNREACHABLE();
}
break;
case BLEZL:
if ((instr->RtFieldRaw() == instr->RsFieldRaw())
&& (instr->RtFieldRaw() != 0)) {
Format(instr, "bgezc 'rt, 'imm16u");
} else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
&& (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
Format(instr, "bgec 'rs, 'rt, 'imm16u");
} else if ((instr->RsFieldRaw() == 0)
&& (instr->RtFieldRaw() != 0)) {
Format(instr, "blezc 'rt, 'imm16u");
} else {
UNREACHABLE();
}
break;
case BGTZL:
if ((instr->RtFieldRaw() == instr->RsFieldRaw())
&& (instr->RtFieldRaw() != 0)) {
Format(instr, "bltzc 'rt, 'imm16u");
} else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
&& (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
Format(instr, "bltc 'rs, 'rt, 'imm16u");
} else if ((instr->RsFieldRaw() == 0)
&& (instr->RtFieldRaw() != 0)) {
Format(instr, "bgtzc 'rt, 'imm16u");
} else {
UNREACHABLE();
}
break;
case BEQZC:
if (instr->RsFieldRaw() != 0) {
Format(instr, "beqzc 'rs, 'imm21x");
}
break;
case BNEZC:
if (instr->RsFieldRaw() != 0) {
Format(instr, "bnezc 'rs, 'imm21x");
}
break; break;
// ------------- Arithmetic instructions. // ------------- Arithmetic instructions.
case ADDI: case ADDI:
Format(instr, "addi 'rt, 'rs, 'imm16s"); if (kArchVariant != kMips64r6) {
Format(instr, "addi 'rt, 'rs, 'imm16s");
} else {
// Check if BOVC or BEQC instruction.
if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
Format(instr, "bovc 'rs, 'rt, 'imm16s");
} else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
Format(instr, "beqc 'rs, 'rt, 'imm16s");
} else {
UNREACHABLE();
}
}
break; break;
case DADDI: case DADDI:
Format(instr, "daddi 'rt, 'rs, 'imm16s"); if (kArchVariant != kMips64r6) {
Format(instr, "daddi 'rt, 'rs, 'imm16s");
} else {
// Check if BNVC or BNEC instruction.
if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
Format(instr, "bnvc 'rs, 'rt, 'imm16s");
} else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
Format(instr, "bnec 'rs, 'rt, 'imm16s");
} else {
UNREACHABLE();
}
}
break; break;
case ADDIU: case ADDIU:
Format(instr, "addiu 'rt, 'rs, 'imm16s"); Format(instr, "addiu 'rt, 'rs, 'imm16s");
@ -945,7 +1286,18 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "xori 'rt, 'rs, 'imm16x"); Format(instr, "xori 'rt, 'rs, 'imm16x");
break; break;
case LUI: case LUI:
Format(instr, "lui 'rt, 'imm16x"); if (kArchVariant != kMips64r6) {
Format(instr, "lui 'rt, 'imm16x");
} else {
if (instr->RsValue() != 0) {
Format(instr, "aui 'rt, 'imm16x");
} else {
Format(instr, "lui 'rt, 'imm16x");
}
}
break;
case DAUI:
Format(instr, "daui 'rt, 'imm16x");
break; break;
// ------------- Memory instructions. // ------------- Memory instructions.
case LB: case LB:

View File

@ -2364,14 +2364,12 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ BranchOnOverflow(&stub_call, scratch1); __ BranchOnOverflow(&stub_call, scratch1);
break; break;
case Token::MUL: { case Token::MUL: {
__ SmiUntag(scratch1, right); __ Dmulh(v0, left, right);
__ Dmult(left, scratch1); __ dsra32(scratch2, v0, 0);
__ mflo(scratch1); __ sra(scratch1, v0, 31);
__ mfhi(scratch2); __ Branch(USE_DELAY_SLOT, &stub_call, ne, scratch2, Operand(scratch1));
__ dsra32(scratch1, scratch1, 31); __ SmiTag(v0);
__ Branch(&stub_call, ne, scratch1, Operand(scratch2)); __ Branch(USE_DELAY_SLOT, &done, ne, v0, Operand(zero_reg));
__ mflo(v0);
__ Branch(&done, ne, v0, Operand(zero_reg));
__ Daddu(scratch2, right, left); __ Daddu(scratch2, right, left);
__ Branch(&stub_call, lt, scratch2, Operand(zero_reg)); __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
ASSERT(Smi::FromInt(0) == 0); ASSERT(Smi::FromInt(0) == 0);
@ -3943,12 +3941,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ Dsubu(string_length, string_length, Operand(scratch1)); __ Dsubu(string_length, string_length, Operand(scratch1));
__ SmiUntag(scratch1); __ SmiUntag(scratch1);
__ Dmult(array_length, scratch1); __ Dmul(scratch2, array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero. // zero.
__ mfhi(scratch2); __ dsra32(scratch1, scratch2, 0);
__ Branch(&bailout, ne, scratch2, Operand(zero_reg)); __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
__ mflo(scratch2);
__ SmiUntag(string_length); __ SmiUntag(string_length);
__ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3); __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
__ BranchOnOverflow(&bailout, scratch3); __ BranchOnOverflow(&bailout, scratch3);

View File

@ -1119,7 +1119,7 @@ void LCodeGen::DoModI(LModI* instr) {
const Register result_reg = ToRegister(instr->result()); const Register result_reg = ToRegister(instr->result());
// div runs in the background while we check for special cases. // div runs in the background while we check for special cases.
__ ddiv(left_reg, right_reg); __ Dmod(result_reg, left_reg, right_reg);
Label done; Label done;
// Check for x % 0, we have to deopt in this case because we can't return a // Check for x % 0, we have to deopt in this case because we can't return a
@ -1144,8 +1144,7 @@ void LCodeGen::DoModI(LModI* instr) {
} }
// If we care about -0, test if the dividend is <0 and the result is 0. // If we care about -0, test if the dividend is <0 and the result is 0.
__ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg)); __ Branch(&done, ge, left_reg, Operand(zero_reg));
__ mfhi(result_reg);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg)); DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
@ -1235,7 +1234,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// On MIPS div is asynchronous - it will run in the background while we // On MIPS div is asynchronous - it will run in the background while we
// check for special cases. // check for special cases.
__ ddiv(dividend, divisor); __ Ddiv(result, dividend, divisor);
// Check for x / 0. // Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
@ -1260,11 +1259,14 @@ void LCodeGen::DoDivI(LDivI* instr) {
} }
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
__ mfhi(result); // Calculate remainder.
DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg)); Register remainder = ToRegister(instr->temp());
__ mflo(result); if (kArchVariant != kMips64r6) {
} else { __ mfhi(remainder);
__ mflo(result); } else {
__ dmod(remainder, dividend, divisor);
}
DeoptimizeIf(ne, instr->environment(), remainder, Operand(zero_reg));
} }
} }
@ -1391,7 +1393,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// On MIPS div is asynchronous - it will run in the background while we // On MIPS div is asynchronous - it will run in the background while we
// check for special cases. // check for special cases.
__ ddiv(dividend, divisor); __ Ddiv(result, dividend, divisor);
// Check for x / 0. // Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
@ -1418,8 +1420,11 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// We performed a truncating division. Correct the result if necessary. // We performed a truncating division. Correct the result if necessary.
Label done; Label done;
Register remainder = scratch0(); Register remainder = scratch0();
__ mfhi(remainder); if (kArchVariant != kMips64r6) {
__ mflo(result); __ mfhi(remainder);
} else {
__ dmod(remainder, dividend, divisor);
}
__ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT); __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
__ Xor(remainder, remainder, Operand(divisor)); __ Xor(remainder, remainder, Operand(divisor));
__ Branch(&done, ge, remainder, Operand(zero_reg)); __ Branch(&done, ge, remainder, Operand(zero_reg));
@ -1507,21 +1512,16 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (overflow) { if (overflow) {
// hi:lo = left * right. // hi:lo = left * right.
if (instr->hydrogen()->representation().IsSmi()) { if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left); __ Dmulh(result, left, right);
__ dmult(result, right);
__ mfhi(scratch);
__ mflo(result);
} else { } else {
__ dmult(left, right); __ Dmul(result, left, right);
__ mfhi(scratch); }
__ mflo(result); __ dsra32(scratch, result, 0);
__ sra(at, result, 31);
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiTag(result);
} }
__ dsra32(at, result, 31);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
if (!instr->hydrogen()->representation().IsSmi()) {
DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt));
DeoptimizeIf(lt, instr->environment(), result, Operand(kMinInt));
}
} else { } else {
if (instr->hydrogen()->representation().IsSmi()) { if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left); __ SmiUntag(result, left);

View File

@ -1307,8 +1307,10 @@ LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
ASSERT(instr->right()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left()); LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right()); LOperand* divisor = UseRegister(instr->right());
LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
? NULL : TempRegister();
LInstruction* result = LInstruction* result =
DefineAsRegister(new(zone()) LDivI(dividend, divisor)); DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
if (instr->CheckFlag(HValue::kCanBeDivByZero) || if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero) || instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
(instr->CheckFlag(HValue::kCanOverflow) && (instr->CheckFlag(HValue::kCanOverflow) &&

View File

@ -689,15 +689,17 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
}; };
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> { class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public: public:
LDivI(LOperand* dividend, LOperand* divisor) { LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend; inputs_[0] = dividend;
inputs_[1] = divisor; inputs_[1] = divisor;
temps_[0] = temp;
} }
LOperand* dividend() { return inputs_[0]; } LOperand* dividend() { return inputs_[0]; }
LOperand* divisor() { return inputs_[1]; } LOperand* divisor() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)

View File

@ -676,21 +676,33 @@ void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) { if (rt.is_reg()) {
if (kArchVariant == kLoongson) { mul(rd, rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
mul(rd, rs, at);
}
}
void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
mult(rs, rt.rm()); mult(rs, rt.rm());
mflo(rd); mfhi(rd);
} else { } else {
mul(rd, rs, rt.rm()); muh(rd, rs, rt.rm());
} }
} else { } else {
// li handles the relocation. // li handles the relocation.
ASSERT(!rs.is(at)); ASSERT(!rs.is(at));
li(at, rt); li(at, rt);
if (kArchVariant == kLoongson) { if (kArchVariant != kMips64r6) {
mult(rs, at); mult(rs, at);
mflo(rd); mfhi(rd);
} else { } else {
mul(rd, rs, at); muh(rd, rs, at);
} }
} }
} }
@ -698,12 +710,9 @@ void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) { if (rt.is_reg()) {
if (kArchVariant == kLoongson) { if (kArchVariant == kMips64r6) {
dmult(rs, rt.rm()); dmul(rd, rs, rt.rm());
mflo(rd);
} else { } else {
// TODO(yuyin):
// dmul(rd, rs, rt.rm());
dmult(rs, rt.rm()); dmult(rs, rt.rm());
mflo(rd); mflo(rd);
} }
@ -711,12 +720,9 @@ void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
// li handles the relocation. // li handles the relocation.
ASSERT(!rs.is(at)); ASSERT(!rs.is(at));
li(at, rt); li(at, rt);
if (kArchVariant == kLoongson) { if (kArchVariant == kMips64r6) {
dmult(rs, at); dmul(rd, rs, at);
mflo(rd);
} else { } else {
// TODO(yuyin):
// dmul(rd, rs, at);
dmult(rs, at); dmult(rs, at);
mflo(rd); mflo(rd);
} }
@ -724,6 +730,28 @@ void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
} }
void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant == kMips64r6) {
dmuh(rd, rs, rt.rm());
} else {
dmult(rs, rt.rm());
mfhi(rd);
}
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
if (kArchVariant == kMips64r6) {
dmuh(rd, rs, at);
} else {
dmult(rs, at);
mfhi(rd);
}
}
}
void MacroAssembler::Mult(Register rs, const Operand& rt) { void MacroAssembler::Mult(Register rs, const Operand& rt) {
if (rt.is_reg()) { if (rt.is_reg()) {
mult(rs, rt.rm()); mult(rs, rt.rm());
@ -796,6 +824,31 @@ void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
} }
void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
mflo(rd);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
ddiv(rs, at);
mflo(rd);
}
} else {
if (rt.is_reg()) {
ddiv(rd, rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
ddiv(rd, rs, at);
}
}
}
void MacroAssembler::Divu(Register rs, const Operand& rt) { void MacroAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) { if (rt.is_reg()) {
divu(rs, rt.rm()); divu(rs, rt.rm());
@ -820,6 +873,31 @@ void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
} }
void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
mfhi(rd);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
ddiv(rs, at);
mfhi(rd);
}
} else {
if (rt.is_reg()) {
dmod(rd, rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
dmod(rd, rs, at);
}
}
}
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) { if (rt.is_reg()) {
and_(rd, rs, rt.rm()); and_(rd, rs, rt.rm());
@ -957,11 +1035,7 @@ void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
if (kArchVariant == kLoongson) {
lw(zero_reg, rs);
} else {
pref(hint, rs); pref(hint, rs);
}
} }
@ -1385,49 +1459,102 @@ void MacroAssembler::BranchF(Label* target,
ASSERT(nan || target); ASSERT(nan || target);
// Check for unordered (NaN) cases. // Check for unordered (NaN) cases.
if (nan) { if (nan) {
c(UN, D, cmp1, cmp2); if (kArchVariant != kMips64r6) {
bc1t(nan); c(UN, D, cmp1, cmp2);
bc1t(nan);
} else {
// Use f31 for comparison result. It has to be unavailable to lithium
// register allocator.
ASSERT(!cmp1.is(f31) && !cmp2.is(f31));
cmp(UN, L, f31, cmp1, cmp2);
bc1nez(nan, f31);
}
} }
if (target) { if (kArchVariant != kMips64r6) {
// Here NaN cases were either handled by this function or are assumed to if (target) {
// have been handled by the caller. // Here NaN cases were either handled by this function or are assumed to
// Unsigned conditions are treated as their signed counterpart. // have been handled by the caller.
switch (cc) { switch (cc) {
case lt: case lt:
c(OLT, D, cmp1, cmp2); c(OLT, D, cmp1, cmp2);
bc1t(target); bc1t(target);
break; break;
case gt: case gt:
c(ULE, D, cmp1, cmp2); c(ULE, D, cmp1, cmp2);
bc1f(target); bc1f(target);
break; break;
case ge: case ge:
c(ULT, D, cmp1, cmp2); c(ULT, D, cmp1, cmp2);
bc1f(target); bc1f(target);
break; break;
case le: case le:
c(OLE, D, cmp1, cmp2); c(OLE, D, cmp1, cmp2);
bc1t(target); bc1t(target);
break; break;
case eq: case eq:
c(EQ, D, cmp1, cmp2); c(EQ, D, cmp1, cmp2);
bc1t(target); bc1t(target);
break; break;
case ueq: case ueq:
c(UEQ, D, cmp1, cmp2); c(UEQ, D, cmp1, cmp2);
bc1t(target); bc1t(target);
break; break;
case ne: case ne:
c(EQ, D, cmp1, cmp2); c(EQ, D, cmp1, cmp2);
bc1f(target); bc1f(target);
break; break;
case nue: case nue:
c(UEQ, D, cmp1, cmp2); c(UEQ, D, cmp1, cmp2);
bc1f(target); bc1f(target);
break; break;
default: default:
CHECK(0); CHECK(0);
}
}
} else {
if (target) {
// Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller.
// Unsigned conditions are treated as their signed counterpart.
// Use f31 for comparison result, it is valid in fp64 (FR = 1) mode.
ASSERT(!cmp1.is(f31) && !cmp2.is(f31));
switch (cc) {
case lt:
cmp(OLT, L, f31, cmp1, cmp2);
bc1nez(target, f31);
break;
case gt:
cmp(ULE, L, f31, cmp1, cmp2);
bc1eqz(target, f31);
break;
case ge:
cmp(ULT, L, f31, cmp1, cmp2);
bc1eqz(target, f31);
break;
case le:
cmp(OLE, L, f31, cmp1, cmp2);
bc1nez(target, f31);
break;
case eq:
cmp(EQ, L, f31, cmp1, cmp2);
bc1nez(target, f31);
break;
case ueq:
cmp(UEQ, L, f31, cmp1, cmp2);
bc1nez(target, f31);
break;
case ne:
cmp(EQ, L, f31, cmp1, cmp2);
bc1eqz(target, f31);
break;
case nue:
cmp(UEQ, L, f31, cmp1, cmp2);
bc1eqz(target, f31);
break;
default:
CHECK(0);
}
} }
} }
@ -1471,7 +1598,7 @@ void MacroAssembler::Move(FPURegister dst, double imm) {
void MacroAssembler::Movz(Register rd, Register rs, Register rt) { void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
if (kArchVariant == kLoongson) { if (kArchVariant == kMips64r6) {
Label done; Label done;
Branch(&done, ne, rt, Operand(zero_reg)); Branch(&done, ne, rt, Operand(zero_reg));
mov(rd, rs); mov(rd, rs);
@ -1483,7 +1610,7 @@ void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
void MacroAssembler::Movn(Register rd, Register rs, Register rt) { void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
if (kArchVariant == kLoongson) { if (kArchVariant == kMips64r6) {
Label done; Label done;
Branch(&done, eq, rt, Operand(zero_reg)); Branch(&done, eq, rt, Operand(zero_reg));
mov(rd, rs); mov(rd, rs);
@ -2372,48 +2499,64 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
// Signed comparison. // Signed comparison.
case greater: case greater:
// rs > rt
slt(scratch, r2, rs); slt(scratch, r2, rs);
daddiu(scratch, scratch, -1); beq(scratch, zero_reg, 2);
bgezal(scratch, offset); nop();
bal(offset);
break; break;
case greater_equal: case greater_equal:
// rs >= rt
slt(scratch, rs, r2); slt(scratch, rs, r2);
daddiu(scratch, scratch, -1); bne(scratch, zero_reg, 2);
bltzal(scratch, offset); nop();
bal(offset);
break; break;
case less: case less:
// rs < r2
slt(scratch, rs, r2); slt(scratch, rs, r2);
daddiu(scratch, scratch, -1); bne(scratch, zero_reg, 2);
bgezal(scratch, offset); nop();
bal(offset);
break; break;
case less_equal: case less_equal:
// rs <= r2
slt(scratch, r2, rs); slt(scratch, r2, rs);
daddiu(scratch, scratch, -1); bne(scratch, zero_reg, 2);
bltzal(scratch, offset); nop();
bal(offset);
break; break;
// Unsigned comparison. // Unsigned comparison.
case Ugreater: case Ugreater:
// rs > rt
sltu(scratch, r2, rs); sltu(scratch, r2, rs);
daddiu(scratch, scratch, -1); beq(scratch, zero_reg, 2);
bgezal(scratch, offset); nop();
bal(offset);
break; break;
case Ugreater_equal: case Ugreater_equal:
// rs >= rt
sltu(scratch, rs, r2); sltu(scratch, rs, r2);
daddiu(scratch, scratch, -1); bne(scratch, zero_reg, 2);
bltzal(scratch, offset); nop();
bal(offset);
break; break;
case Uless: case Uless:
// rs < r2
sltu(scratch, rs, r2); sltu(scratch, rs, r2);
daddiu(scratch, scratch, -1); bne(scratch, zero_reg, 2);
bgezal(scratch, offset); nop();
bal(offset);
break; break;
case Uless_equal: case Uless_equal:
// rs <= r2
sltu(scratch, r2, rs); sltu(scratch, r2, rs);
daddiu(scratch, scratch, -1); bne(scratch, zero_reg, 2);
bltzal(scratch, offset); nop();
bal(offset);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
@ -2470,54 +2613,71 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
// Signed comparison. // Signed comparison.
case greater: case greater:
// rs > rt
slt(scratch, r2, rs); slt(scratch, r2, rs);
daddiu(scratch, scratch, -1); beq(scratch, zero_reg, 2);
nop();
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bgezal(scratch, offset); bal(offset);
break; break;
case greater_equal: case greater_equal:
// rs >= rt
slt(scratch, rs, r2); slt(scratch, rs, r2);
daddiu(scratch, scratch, -1); bne(scratch, zero_reg, 2);
nop();
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bltzal(scratch, offset); bal(offset);
break; break;
case less: case less:
// rs < r2
slt(scratch, rs, r2); slt(scratch, rs, r2);
daddiu(scratch, scratch, -1); bne(scratch, zero_reg, 2);
nop();
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bgezal(scratch, offset); bal(offset);
break; break;
case less_equal: case less_equal:
// rs <= r2
slt(scratch, r2, rs); slt(scratch, r2, rs);
daddiu(scratch, scratch, -1); bne(scratch, zero_reg, 2);
nop();
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bltzal(scratch, offset); bal(offset);
break; break;
// Unsigned comparison. // Unsigned comparison.
case Ugreater: case Ugreater:
// rs > rt
sltu(scratch, r2, rs); sltu(scratch, r2, rs);
daddiu(scratch, scratch, -1); beq(scratch, zero_reg, 2);
nop();
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bgezal(scratch, offset); bal(offset);
break; break;
case Ugreater_equal: case Ugreater_equal:
// rs >= rt
sltu(scratch, rs, r2); sltu(scratch, rs, r2);
daddiu(scratch, scratch, -1); bne(scratch, zero_reg, 2);
nop();
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bltzal(scratch, offset); bal(offset);
break; break;
case Uless: case Uless:
// rs < r2
sltu(scratch, rs, r2); sltu(scratch, rs, r2);
daddiu(scratch, scratch, -1); bne(scratch, zero_reg, 2);
nop();
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bgezal(scratch, offset); bal(offset);
break; break;
case Uless_equal: case Uless_equal:
// rs <= r2
sltu(scratch, r2, rs); sltu(scratch, r2, rs);
daddiu(scratch, scratch, -1); bne(scratch, zero_reg, 2);
nop();
offset = shifted_branch_offset(L, false); offset = shifted_branch_offset(L, false);
bltzal(scratch, offset); bal(offset);
break; break;
default: default:
@ -5456,10 +5616,7 @@ void MacroAssembler::CheckPageFlag(
int mask, int mask,
Condition cc, Condition cc,
Label* condition_met) { Label* condition_met) {
// TODO(plind): Fix li() so we can use constant embedded inside And(). And(scratch, object, Operand(~Page::kPageAlignmentMask));
// And(scratch, object, Operand(~Page::kPageAlignmentMask));
li(at, Operand(~Page::kPageAlignmentMask), CONSTANT_SIZE); // plind HACK
And(scratch, object, at);
ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask)); And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg)); Branch(condition_met, cc, scratch, Operand(zero_reg));
@ -5933,8 +6090,7 @@ void MacroAssembler::TruncatingDiv(Register result,
ASSERT(!result.is(at)); ASSERT(!result.is(at));
MultiplierAndShift ms(divisor); MultiplierAndShift ms(divisor);
li(at, Operand(ms.multiplier())); li(at, Operand(ms.multiplier()));
Mult(dividend, Operand(at)); Mulh(result, dividend, Operand(at));
mfhi(result);
if (divisor > 0 && ms.multiplier() < 0) { if (divisor > 0 && ms.multiplier() < 0) {
Addu(result, result, Operand(dividend)); Addu(result, result, Operand(dividend));
} }

View File

@ -606,10 +606,14 @@ class MacroAssembler: public Assembler {
DEFINE_INSTRUCTION(Addu); DEFINE_INSTRUCTION(Addu);
DEFINE_INSTRUCTION(Daddu); DEFINE_INSTRUCTION(Daddu);
DEFINE_INSTRUCTION(Ddiv);
DEFINE_INSTRUCTION(Subu); DEFINE_INSTRUCTION(Subu);
DEFINE_INSTRUCTION(Dsubu); DEFINE_INSTRUCTION(Dsubu);
DEFINE_INSTRUCTION(Dmod);
DEFINE_INSTRUCTION(Mul); DEFINE_INSTRUCTION(Mul);
DEFINE_INSTRUCTION(Mulh);
DEFINE_INSTRUCTION(Dmul); DEFINE_INSTRUCTION(Dmul);
DEFINE_INSTRUCTION(Dmulh);
DEFINE_INSTRUCTION2(Mult); DEFINE_INSTRUCTION2(Mult);
DEFINE_INSTRUCTION2(Dmult); DEFINE_INSTRUCTION2(Dmult);
DEFINE_INSTRUCTION2(Multu); DEFINE_INSTRUCTION2(Multu);

View File

@ -1955,9 +1955,6 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
switch (op) { switch (op) {
case COP1: // Coprocessor instructions. case COP1: // Coprocessor instructions.
switch (instr->RsFieldRaw()) { switch (instr->RsFieldRaw()) {
case BC1: // Handled in DecodeTypeImmed, should never come here.
UNREACHABLE();
break;
case CFC1: case CFC1:
// At the moment only FCSR is supported. // At the moment only FCSR is supported.
ASSERT(fs_reg == kFCSRRegister); ASSERT(fs_reg == kFCSRRegister);
@ -1976,8 +1973,6 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
case MTC1: case MTC1:
case DMTC1: case DMTC1:
case MTHC1: case MTHC1:
// Do the store in the execution step.
break;
case S: case S:
case D: case D:
case W: case W:
@ -1986,7 +1981,8 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
// Do everything in the execution step. // Do everything in the execution step.
break; break;
default: default:
UNIMPLEMENTED_MIPS(); // BC1 BC1EQZ BC1NEZ handled in DecodeTypeImmed, should never come here.
UNREACHABLE();
} }
break; break;
case COP1X: case COP1X:
@ -2071,13 +2067,23 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
case DSRAV: case DSRAV:
*alu_out = rt >> rs; *alu_out = rt >> rs;
break; break;
case MFHI: case MFHI: // MFHI == CLZ on R6.
*alu_out = get_register(HI); if (kArchVariant != kMips64r6) {
ASSERT(instr->SaValue() == 0);
*alu_out = get_register(HI);
} else {
// MIPS spec: If no bits were set in GPR rs, the result written to
// GPR rd is 32.
// GCC __builtin_clz: If input is 0, the result is undefined.
ASSERT(instr->SaValue() == 1);
*alu_out =
rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u);
}
break; break;
case MFLO: case MFLO:
*alu_out = get_register(LO); *alu_out = get_register(LO);
break; break;
case MULT: case MULT: // MULT == D_MUL_MUH.
// TODO(plind) - Unify MULT/DMULT with single set of 64-bit HI/Lo // TODO(plind) - Unify MULT/DMULT with single set of 64-bit HI/Lo
// regs. // regs.
// TODO(plind) - make the 32-bit MULT ops conform to spec regarding // TODO(plind) - make the 32-bit MULT ops conform to spec regarding
@ -2088,9 +2094,23 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
case MULTU: case MULTU:
*u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u); *u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
break; break;
case DMULT: case DMULT: // DMULT == D_MUL_MUH.
*i128resultH = MultiplyHighSigned(rs, rt); if (kArchVariant != kMips64r6) {
*i128resultL = rs * rt; *i128resultH = MultiplyHighSigned(rs, rt);
*i128resultL = rs * rt;
} else {
switch (instr->SaValue()) {
case MUL_OP:
*i128resultL = rs * rt;
break;
case MUH_OP:
*i128resultH = MultiplyHighSigned(rs, rt);
break;
default:
UNIMPLEMENTED_MIPS();
break;
}
}
break; break;
case DMULTU: case DMULTU:
UNIMPLEMENTED_MIPS(); UNIMPLEMENTED_MIPS();
@ -2295,6 +2315,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case COP1: case COP1:
switch (instr->RsFieldRaw()) { switch (instr->RsFieldRaw()) {
case BC1: // Branch on coprocessor condition. case BC1: // Branch on coprocessor condition.
case BC1EQZ:
case BC1NEZ:
UNREACHABLE(); UNREACHABLE();
break; break;
case CFC1: case CFC1:
@ -2328,20 +2350,9 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
f = get_fpu_register_float(fs_reg); f = get_fpu_register_float(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(f)); set_fpu_register_double(fd_reg, static_cast<double>(f));
break; break;
case CVT_W_S:
case CVT_L_S:
case TRUNC_W_S:
case TRUNC_L_S:
case ROUND_W_S:
case ROUND_L_S:
case FLOOR_W_S:
case FLOOR_L_S:
case CEIL_W_S:
case CEIL_L_S:
case CVT_PS_S:
UNIMPLEMENTED_MIPS();
break;
default: default:
// CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
// CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented.
UNREACHABLE(); UNREACHABLE();
} }
break; break;
@ -2514,25 +2525,77 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
alu_out = get_fpu_register_signed_word(fs_reg); alu_out = get_fpu_register_signed_word(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(alu_out)); set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
break; break;
default: default: // Mips64r6 CMP.S instructions unimplemented.
UNREACHABLE(); UNREACHABLE();
} }
break; break;
case L: case L:
fs = get_fpu_register_double(fs_reg);
ft = get_fpu_register_double(ft_reg);
switch (instr->FunctionFieldRaw()) { switch (instr->FunctionFieldRaw()) {
case CVT_D_L: // Mips32r2 instruction. case CVT_D_L: // Mips32r2 instruction.
i64 = get_fpu_register(fs_reg); i64 = get_fpu_register(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(i64)); set_fpu_register_double(fd_reg, static_cast<double>(i64));
break; break;
case CVT_S_L: case CVT_S_L:
UNIMPLEMENTED_MIPS(); UNIMPLEMENTED_MIPS();
break; break;
default: case CMP_AF: // Mips64r6 CMP.D instructions.
UNIMPLEMENTED_MIPS();
break;
case CMP_UN:
if (std::isnan(fs) || std::isnan(ft)) {
set_fpu_register(fd_reg, -1);
} else {
set_fpu_register(fd_reg, 0);
}
break;
case CMP_EQ:
if (fs == ft) {
set_fpu_register(fd_reg, -1);
} else {
set_fpu_register(fd_reg, 0);
}
break;
case CMP_UEQ:
if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
set_fpu_register(fd_reg, -1);
} else {
set_fpu_register(fd_reg, 0);
}
break;
case CMP_LT:
if (fs < ft) {
set_fpu_register(fd_reg, -1);
} else {
set_fpu_register(fd_reg, 0);
}
break;
case CMP_ULT:
if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
set_fpu_register(fd_reg, -1);
} else {
set_fpu_register(fd_reg, 0);
}
break;
case CMP_LE:
if (fs <= ft) {
set_fpu_register(fd_reg, -1);
} else {
set_fpu_register(fd_reg, 0);
}
break;
case CMP_ULE:
if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
set_fpu_register(fd_reg, -1);
} else {
set_fpu_register(fd_reg, 0);
}
break;
default: // CMP_OR CMP_UNE CMP_NE UNIMPLEMENTED
UNREACHABLE(); UNREACHABLE();
} }
break; break;
case PS:
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
@ -2572,32 +2635,91 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
} }
// Instructions using HI and LO registers. // Instructions using HI and LO registers.
case MULT: case MULT:
set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff)); if (kArchVariant != kMips64r6) {
set_register(HI, static_cast<int32_t>(i64hilo >> 32)); set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
set_register(HI, static_cast<int32_t>(i64hilo >> 32));
} else {
switch (instr->SaValue()) {
case MUL_OP:
set_register(rd_reg,
static_cast<int32_t>(i64hilo & 0xffffffff));
break;
case MUH_OP:
set_register(rd_reg, static_cast<int32_t>(i64hilo >> 32));
break;
default:
UNIMPLEMENTED_MIPS();
break;
}
}
break; break;
case MULTU: case MULTU:
set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff)); set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
set_register(HI, static_cast<int32_t>(u64hilo >> 32)); set_register(HI, static_cast<int32_t>(u64hilo >> 32));
break; break;
case DMULT: case DMULT: // DMULT == D_MUL_MUH.
set_register(LO, static_cast<int64_t>(i128resultL)); if (kArchVariant != kMips64r6) {
set_register(HI, static_cast<int64_t>(i128resultH)); set_register(LO, static_cast<int64_t>(i128resultL));
set_register(HI, static_cast<int64_t>(i128resultH));
} else {
switch (instr->SaValue()) {
case MUL_OP:
set_register(rd_reg, static_cast<int64_t>(i128resultL));
break;
case MUH_OP:
set_register(rd_reg, static_cast<int64_t>(i128resultH));
break;
default:
UNIMPLEMENTED_MIPS();
break;
}
}
break; break;
case DMULTU: case DMULTU:
UNIMPLEMENTED_MIPS(); UNIMPLEMENTED_MIPS();
break; break;
case DSLL:
set_register(rd_reg, alu_out);
break;
case DIV: case DIV:
case DDIV: case DDIV:
// Divide by zero and overflow was not checked in the configuration switch (kArchVariant) {
// step - div and divu do not raise exceptions. On division by 0 case kMips64r2:
// the result will be UNPREDICTABLE. On overflow (INT_MIN/-1), // Divide by zero and overflow was not checked in the
// return INT_MIN which is what the hardware does. // configuration step - div and divu do not raise exceptions. On
if (rs == INT_MIN && rt == -1) { // division by 0 the result will be UNPREDICTABLE. On overflow
set_register(LO, INT_MIN); // (INT_MIN/-1), return INT_MIN which is what the hardware does.
set_register(HI, 0); if (rs == INT_MIN && rt == -1) {
} else if (rt != 0) { set_register(LO, INT_MIN);
set_register(LO, rs / rt); set_register(HI, 0);
set_register(HI, rs % rt); } else if (rt != 0) {
set_register(LO, rs / rt);
set_register(HI, rs % rt);
}
break;
case kMips64r6:
switch (instr->SaValue()) {
case DIV_OP:
if (rs == INT_MIN && rt == -1) {
set_register(rd_reg, INT_MIN);
} else if (rt != 0) {
set_register(rd_reg, rs / rt);
}
break;
case MOD_OP:
if (rs == INT_MIN && rt == -1) {
set_register(rd_reg, 0);
} else if (rt != 0) {
set_register(rd_reg, rs % rt);
}
break;
default:
UNIMPLEMENTED_MIPS();
break;
}
break;
default:
break;
} }
break; break;
case DIVU: case DIVU:
@ -2696,6 +2818,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
int16_t imm16 = instr->Imm16Value(); int16_t imm16 = instr->Imm16Value();
int32_t ft_reg = instr->FtValue(); // Destination register. int32_t ft_reg = instr->FtValue(); // Destination register.
int64_t ft = get_fpu_register(ft_reg);
// Zero extended immediate. // Zero extended immediate.
uint32_t oe_imm16 = 0xffff & imm16; uint32_t oe_imm16 = 0xffff & imm16;
@ -2742,6 +2865,26 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
next_pc = current_pc + kBranchReturnOffset; next_pc = current_pc + kBranchReturnOffset;
} }
break; break;
case BC1EQZ:
do_branch = (ft & 0x1) ? false : true;
execute_branch_delay_instruction = true;
// Set next_pc.
if (do_branch) {
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
} else {
next_pc = current_pc + kBranchReturnOffset;
}
break;
case BC1NEZ:
do_branch = (ft & 0x1) ? true : false;
execute_branch_delay_instruction = true;
// Set next_pc.
if (do_branch) {
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
} else {
next_pc = current_pc + kBranchReturnOffset;
}
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }

View File

@ -18064,7 +18064,7 @@ THREADED_TEST(QuietSignalingNaNs) {
uint64_t stored_bits = DoubleToBits(stored_number); uint64_t stored_bits = DoubleToBits(stored_number);
// Check if quiet nan (bits 51..62 all set). // Check if quiet nan (bits 51..62 all set).
#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \ #if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
!defined(USE_SIMULATOR) !defined(_MIPS_ARCH_MIPS64R6) && !defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0 // Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754. // on MIPS architecture. Allowed by IEEE-754.
CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff)); CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
@ -18085,7 +18085,7 @@ THREADED_TEST(QuietSignalingNaNs) {
uint64_t stored_bits = DoubleToBits(stored_date); uint64_t stored_bits = DoubleToBits(stored_date);
// Check if quiet nan (bits 51..62 all set). // Check if quiet nan (bits 51..62 all set).
#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \ #if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
!defined(USE_SIMULATOR) !defined(_MIPS_ARCH_MIPS64R6) && !defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0 // Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754. // on MIPS architecture. Allowed by IEEE-754.
CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff)); CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));

View File

@ -557,21 +557,27 @@ TEST(MIPS7) {
__ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) ); __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
__ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) ); __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
__ c(UN, D, f4, f6); if (kArchVariant != kMips64r6) {
__ bc1f(&neither_is_nan); __ c(UN, D, f4, f6);
__ bc1f(&neither_is_nan);
} else {
__ cmp(UN, L, f2, f4, f6);
__ bc1eqz(&neither_is_nan, f2);
}
__ nop(); __ nop();
__ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) ); __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
__ Branch(&outa_here); __ Branch(&outa_here);
__ bind(&neither_is_nan); __ bind(&neither_is_nan);
if (kArchVariant == kLoongson) { if (kArchVariant == kMips64r6) {
__ c(OLT, D, f6, f4); __ cmp(OLT, L, f2, f6, f4);
__ bc1t(&less_than); __ bc1nez(&less_than, f2);
} else { } else {
__ c(OLT, D, f6, f4, 2); __ c(OLT, D, f6, f4, 2);
__ bc1t(&less_than, 2); __ bc1t(&less_than, 2);
} }
__ nop(); __ nop();
__ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) ); __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
__ Branch(&outa_here); __ Branch(&outa_here);
@ -832,144 +838,147 @@ TEST(MIPS10) {
TEST(MIPS11) { TEST(MIPS11) {
// Test LWL, LWR, SWL and SWR instructions. // Do not run test on MIPS64r6, as these instructions are removed.
CcTest::InitializeVM(); if (kArchVariant != kMips64r6) {
Isolate* isolate = CcTest::i_isolate(); // Test LWL, LWR, SWL and SWR instructions.
HandleScope scope(isolate); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct { typedef struct {
int32_t reg_init; int32_t reg_init;
int32_t mem_init; int32_t mem_init;
int32_t lwl_0; int32_t lwl_0;
int32_t lwl_1; int32_t lwl_1;
int32_t lwl_2; int32_t lwl_2;
int32_t lwl_3; int32_t lwl_3;
int32_t lwr_0; int32_t lwr_0;
int32_t lwr_1; int32_t lwr_1;
int32_t lwr_2; int32_t lwr_2;
int32_t lwr_3; int32_t lwr_3;
int32_t swl_0; int32_t swl_0;
int32_t swl_1; int32_t swl_1;
int32_t swl_2; int32_t swl_2;
int32_t swl_3; int32_t swl_3;
int32_t swr_0; int32_t swr_0;
int32_t swr_1; int32_t swr_1;
int32_t swr_2; int32_t swr_2;
int32_t swr_3; int32_t swr_3;
} T; } T;
T t; T t;
Assembler assm(isolate, NULL, 0); Assembler assm(isolate, NULL, 0);
// Test all combinations of LWL and vAddr. // Test all combinations of LWL and vAddr.
__ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) ); __ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
__ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)) ); __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)) );
__ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) ); __ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) );
__ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)) ); __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)) );
__ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) ); __ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) );
__ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)) ); __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)) );
__ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) ); __ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) );
__ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)) ); __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)) );
// Test all combinations of LWR and vAddr. // Test all combinations of LWR and vAddr.
__ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) ); __ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
__ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)) ); __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)) );
__ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) ); __ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) );
__ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)) ); __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)) );
__ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) ); __ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) );
__ sw(a6, MemOperand(a0, OFFSET_OF(T, lwr_2)) ); __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwr_2)) );
__ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) ); __ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) );
__ sw(a7, MemOperand(a0, OFFSET_OF(T, lwr_3)) ); __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwr_3)) );
// Test all combinations of SWL and vAddr. // Test all combinations of SWL and vAddr.
__ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) ); __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
__ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) ); __ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) );
__ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) ); __ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) );
__ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) ); __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) );
__ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)) ); __ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)) );
__ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1) ); __ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1) );
__ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) ); __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) );
__ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)) ); __ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)) );
__ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2) ); __ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2) );
__ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) ); __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) );
__ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)) ); __ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)) );
__ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3) ); __ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3) );
// Test all combinations of SWR and vAddr. // Test all combinations of SWR and vAddr.
__ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) ); __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
__ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) ); __ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) );
__ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) ); __ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) );
__ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) ); __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) );
__ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)) ); __ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)) );
__ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1) ); __ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1) );
__ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) ); __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) );
__ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)) ); __ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)) );
__ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2) ); __ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2) );
__ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) ); __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) );
__ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)) ); __ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)) );
__ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) ); __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
__ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3) ); __ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3) );
__ jr(ra); __ jr(ra);
__ nop(); __ nop();
CodeDesc desc; CodeDesc desc;
assm.GetCode(&desc); assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode( Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry()); F3 f = FUNCTION_CAST<F3>(code->entry());
t.reg_init = 0xaabbccdd; t.reg_init = 0xaabbccdd;
t.mem_init = 0x11223344; t.mem_init = 0x11223344;
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0); Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy); USE(dummy);
CHECK_EQ(0x44bbccdd, t.lwl_0); CHECK_EQ(0x44bbccdd, t.lwl_0);
CHECK_EQ(0x3344ccdd, t.lwl_1); CHECK_EQ(0x3344ccdd, t.lwl_1);
CHECK_EQ(0x223344dd, t.lwl_2); CHECK_EQ(0x223344dd, t.lwl_2);
CHECK_EQ(0x11223344, t.lwl_3); CHECK_EQ(0x11223344, t.lwl_3);
CHECK_EQ(0x11223344, t.lwr_0); CHECK_EQ(0x11223344, t.lwr_0);
CHECK_EQ(0xaa112233, t.lwr_1); CHECK_EQ(0xaa112233, t.lwr_1);
CHECK_EQ(0xaabb1122, t.lwr_2); CHECK_EQ(0xaabb1122, t.lwr_2);
CHECK_EQ(0xaabbcc11, t.lwr_3); CHECK_EQ(0xaabbcc11, t.lwr_3);
CHECK_EQ(0x112233aa, t.swl_0); CHECK_EQ(0x112233aa, t.swl_0);
CHECK_EQ(0x1122aabb, t.swl_1); CHECK_EQ(0x1122aabb, t.swl_1);
CHECK_EQ(0x11aabbcc, t.swl_2); CHECK_EQ(0x11aabbcc, t.swl_2);
CHECK_EQ(0xaabbccdd, t.swl_3); CHECK_EQ(0xaabbccdd, t.swl_3);
CHECK_EQ(0xaabbccdd, t.swr_0); CHECK_EQ(0xaabbccdd, t.swr_0);
CHECK_EQ(0xbbccdd44, t.swr_1); CHECK_EQ(0xbbccdd44, t.swr_1);
CHECK_EQ(0xccdd3344, t.swr_2); CHECK_EQ(0xccdd3344, t.swr_2);
CHECK_EQ(0xdd223344, t.swr_3); CHECK_EQ(0xdd223344, t.swr_3);
}
} }

View File

@ -122,65 +122,196 @@ TEST(Type0) {
COMPARE(dsubu(v0, v1, s0), COMPARE(dsubu(v0, v1, s0),
"0070102f dsubu v0, v1, s0"); "0070102f dsubu v0, v1, s0");
COMPARE(mult(a0, a1), if (kArchVariant != kMips64r6) {
"00850018 mult a0, a1"); COMPARE(mult(a0, a1),
COMPARE(dmult(a0, a1), "00850018 mult a0, a1");
"0085001c dmult a0, a1"); COMPARE(dmult(a0, a1),
COMPARE(mult(a6, a7), "0085001c dmult a0, a1");
"014b0018 mult a6, a7"); COMPARE(mult(a6, a7),
COMPARE(dmult(a6, a7), "014b0018 mult a6, a7");
"014b001c dmult a6, a7"); COMPARE(dmult(a6, a7),
COMPARE(mult(v0, v1), "014b001c dmult a6, a7");
"00430018 mult v0, v1"); COMPARE(mult(v0, v1),
COMPARE(dmult(v0, v1), "00430018 mult v0, v1");
"0043001c dmult v0, v1"); COMPARE(dmult(v0, v1),
"0043001c dmult v0, v1");
COMPARE(multu(a0, a1), COMPARE(multu(a0, a1),
"00850019 multu a0, a1"); "00850019 multu a0, a1");
COMPARE(dmultu(a0, a1), COMPARE(dmultu(a0, a1),
"0085001d dmultu a0, a1"); "0085001d dmultu a0, a1");
COMPARE(multu(a6, a7), COMPARE(multu(a6, a7),
"014b0019 multu a6, a7"); "014b0019 multu a6, a7");
COMPARE(dmultu(a6, a7), COMPARE(dmultu(a6, a7),
"014b001d dmultu a6, a7"); "014b001d dmultu a6, a7");
COMPARE(multu(v0, v1), COMPARE(multu(v0, v1),
"00430019 multu v0, v1"); "00430019 multu v0, v1");
COMPARE(dmultu(v0, v1), COMPARE(dmultu(v0, v1),
"0043001d dmultu v0, v1"); "0043001d dmultu v0, v1");
COMPARE(div(a0, a1), COMPARE(div(a0, a1),
"0085001a div a0, a1"); "0085001a div a0, a1");
COMPARE(div(a6, a7), COMPARE(div(a6, a7),
"014b001a div a6, a7"); "014b001a div a6, a7");
COMPARE(div(v0, v1), COMPARE(div(v0, v1),
"0043001a div v0, v1"); "0043001a div v0, v1");
COMPARE(ddiv(a0, a1), COMPARE(ddiv(a0, a1),
"0085001e ddiv a0, a1"); "0085001e ddiv a0, a1");
COMPARE(ddiv(a6, a7), COMPARE(ddiv(a6, a7),
"014b001e ddiv a6, a7"); "014b001e ddiv a6, a7");
COMPARE(ddiv(v0, v1), COMPARE(ddiv(v0, v1),
"0043001e ddiv v0, v1"); "0043001e ddiv v0, v1");
COMPARE(divu(a0, a1), COMPARE(divu(a0, a1),
"0085001b divu a0, a1"); "0085001b divu a0, a1");
COMPARE(divu(a6, a7), COMPARE(divu(a6, a7),
"014b001b divu a6, a7"); "014b001b divu a6, a7");
COMPARE(divu(v0, v1), COMPARE(divu(v0, v1),
"0043001b divu v0, v1"); "0043001b divu v0, v1");
COMPARE(ddivu(a0, a1), COMPARE(ddivu(a0, a1),
"0085001f ddivu a0, a1"); "0085001f ddivu a0, a1");
COMPARE(ddivu(a6, a7), COMPARE(ddivu(a6, a7),
"014b001f ddivu a6, a7"); "014b001f ddivu a6, a7");
COMPARE(ddivu(v0, v1), COMPARE(ddivu(v0, v1),
"0043001f ddivu v0, v1"); "0043001f ddivu v0, v1");
if (kArchVariant != kLoongson) {
COMPARE(mul(a0, a1, a2), COMPARE(mul(a0, a1, a2),
"70a62002 mul a0, a1, a2"); "70a62002 mul a0, a1, a2");
COMPARE(mul(a6, a7, t0), COMPARE(mul(a6, a7, t0),
"716c5002 mul a6, a7, t0"); "716c5002 mul a6, a7, t0");
COMPARE(mul(v0, v1, s0), COMPARE(mul(v0, v1, s0),
"70701002 mul v0, v1, s0"); "70701002 mul v0, v1, s0");
} else { // MIPS64r6.
COMPARE(mul(a0, a1, a2),
"00a62098 mul a0, a1, a2");
COMPARE(muh(a0, a1, a2),
"00a620d8 muh a0, a1, a2");
COMPARE(dmul(a0, a1, a2),
"00a6209c dmul a0, a1, a2");
COMPARE(dmuh(a0, a1, a2),
"00a620dc dmuh a0, a1, a2");
COMPARE(mul(a5, a6, a7),
"014b4898 mul a5, a6, a7");
COMPARE(muh(a5, a6, a7),
"014b48d8 muh a5, a6, a7");
COMPARE(dmul(a5, a6, a7),
"014b489c dmul a5, a6, a7");
COMPARE(dmuh(a5, a6, a7),
"014b48dc dmuh a5, a6, a7");
COMPARE(mul(v0, v1, a0),
"00641098 mul v0, v1, a0");
COMPARE(muh(v0, v1, a0),
"006410d8 muh v0, v1, a0");
COMPARE(dmul(v0, v1, a0),
"0064109c dmul v0, v1, a0");
COMPARE(dmuh(v0, v1, a0),
"006410dc dmuh v0, v1, a0");
COMPARE(mulu(a0, a1, a2),
"00a62099 mulu a0, a1, a2");
COMPARE(muhu(a0, a1, a2),
"00a620d9 muhu a0, a1, a2");
COMPARE(dmulu(a0, a1, a2),
"00a6209d dmulu a0, a1, a2");
COMPARE(dmuhu(a0, a1, a2),
"00a620dd dmuhu a0, a1, a2");
COMPARE(mulu(a5, a6, a7),
"014b4899 mulu a5, a6, a7");
COMPARE(muhu(a5, a6, a7),
"014b48d9 muhu a5, a6, a7");
COMPARE(dmulu(a5, a6, a7),
"014b489d dmulu a5, a6, a7");
COMPARE(dmuhu(a5, a6, a7),
"014b48dd dmuhu a5, a6, a7");
COMPARE(mulu(v0, v1, a0),
"00641099 mulu v0, v1, a0");
COMPARE(muhu(v0, v1, a0),
"006410d9 muhu v0, v1, a0");
COMPARE(dmulu(v0, v1, a0),
"0064109d dmulu v0, v1, a0");
COMPARE(dmuhu(v0, v1, a0),
"006410dd dmuhu v0, v1, a0");
COMPARE(div(a0, a1, a2),
"00a6209a div a0, a1, a2");
COMPARE(mod(a0, a1, a2),
"00a620da mod a0, a1, a2");
COMPARE(ddiv(a0, a1, a2),
"00a6209e ddiv a0, a1, a2");
COMPARE(dmod(a0, a1, a2),
"00a620de dmod a0, a1, a2");
COMPARE(div(a5, a6, a7),
"014b489a div a5, a6, a7");
COMPARE(mod(a5, a6, a7),
"014b48da mod a5, a6, a7");
COMPARE(ddiv(a5, a6, a7),
"014b489e ddiv a5, a6, a7");
COMPARE(dmod(a5, a6, a7),
"014b48de dmod a5, a6, a7");
COMPARE(div(v0, v1, a0),
"0064109a div v0, v1, a0");
COMPARE(mod(v0, v1, a0),
"006410da mod v0, v1, a0");
COMPARE(ddiv(v0, v1, a0),
"0064109e ddiv v0, v1, a0");
COMPARE(dmod(v0, v1, a0),
"006410de dmod v0, v1, a0");
COMPARE(divu(a0, a1, a2),
"00a6209b divu a0, a1, a2");
COMPARE(modu(a0, a1, a2),
"00a620db modu a0, a1, a2");
COMPARE(ddivu(a0, a1, a2),
"00a6209f ddivu a0, a1, a2");
COMPARE(dmodu(a0, a1, a2),
"00a620df dmodu a0, a1, a2");
COMPARE(divu(a5, a6, a7),
"014b489b divu a5, a6, a7");
COMPARE(modu(a5, a6, a7),
"014b48db modu a5, a6, a7");
COMPARE(ddivu(a5, a6, a7),
"014b489f ddivu a5, a6, a7");
COMPARE(dmodu(a5, a6, a7),
"014b48df dmodu a5, a6, a7");
COMPARE(divu(v0, v1, a0),
"0064109b divu v0, v1, a0");
COMPARE(modu(v0, v1, a0),
"006410db modu v0, v1, a0");
COMPARE(ddivu(v0, v1, a0),
"0064109f ddivu v0, v1, a0");
COMPARE(dmodu(v0, v1, a0),
"006410df dmodu v0, v1, a0");
COMPARE(bovc(a0, a0, static_cast<int16_t>(0)),
"20840000 bovc a0, a0, 0");
COMPARE(bovc(a1, a0, static_cast<int16_t>(0)),
"20a40000 bovc a1, a0, 0");
COMPARE(bovc(a1, a0, 32767),
"20a47fff bovc a1, a0, 32767");
COMPARE(bovc(a1, a0, -32768),
"20a48000 bovc a1, a0, -32768");
COMPARE(bnvc(a0, a0, static_cast<int16_t>(0)),
"60840000 bnvc a0, a0, 0");
COMPARE(bnvc(a1, a0, static_cast<int16_t>(0)),
"60a40000 bnvc a1, a0, 0");
COMPARE(bnvc(a1, a0, 32767),
"60a47fff bnvc a1, a0, 32767");
COMPARE(bnvc(a1, a0, -32768),
"60a48000 bnvc a1, a0, -32768");
COMPARE(beqzc(a0, 0),
"d8800000 beqzc a0, 0x0");
COMPARE(beqzc(a0, 0xfffff), // 0x0fffff == 1048575.
"d88fffff beqzc a0, 0xfffff");
COMPARE(beqzc(a0, 0x100000), // 0x100000 == -1048576.
"d8900000 beqzc a0, 0x100000");
COMPARE(bnezc(a0, 0),
"f8800000 bnezc a0, 0x0");
COMPARE(bnezc(a0, 0xfffff), // 0x0fffff == 1048575.
"f88fffff bnezc a0, 0xfffff");
COMPARE(bnezc(a0, 0x100000), // 0x100000 == -1048576.
"f8900000 bnezc a0, 0x100000");
} }
COMPARE(addiu(a0, a1, 0x0), COMPARE(addiu(a0, a1, 0x0),
@ -476,42 +607,48 @@ TEST(Type0) {
"2d6a8000 sltiu a6, a7, -32768"); "2d6a8000 sltiu a6, a7, -32768");
COMPARE(sltiu(v0, v1, -1), COMPARE(sltiu(v0, v1, -1),
"2c62ffff sltiu v0, v1, -1"); "2c62ffff sltiu v0, v1, -1");
COMPARE(movz(a0, a1, a2),
"00a6200a movz a0, a1, a2");
COMPARE(movz(s0, s1, s2),
"0232800a movz s0, s1, s2");
COMPARE(movz(a6, a7, t0),
"016c500a movz a6, a7, t0");
COMPARE(movz(v0, v1, a2),
"0066100a movz v0, v1, a2");
COMPARE(movn(a0, a1, a2),
"00a6200b movn a0, a1, a2");
COMPARE(movn(s0, s1, s2),
"0232800b movn s0, s1, s2");
COMPARE(movn(a6, a7, t0),
"016c500b movn a6, a7, t0");
COMPARE(movn(v0, v1, a2),
"0066100b movn v0, v1, a2");
if (kArchVariant != kLoongson) { COMPARE(movt(a0, a1, 1),
COMPARE(movz(a0, a1, a2), "00a52001 movt a0, a1, 1");
"00a6200a movz a0, a1, a2"); COMPARE(movt(s0, s1, 2),
COMPARE(movz(s0, s1, s2), "02298001 movt s0, s1, 2");
"0232800a movz s0, s1, s2"); COMPARE(movt(a6, a7, 3),
COMPARE(movz(a6, a7, t0), "016d5001 movt a6, a7, 3");
"016c500a movz a6, a7, t0"); COMPARE(movt(v0, v1, 7),
COMPARE(movz(v0, v1, a2), "007d1001 movt v0, v1, 7");
"0066100a movz v0, v1, a2"); COMPARE(movf(a0, a1, 0),
COMPARE(movn(a0, a1, a2), "00a02001 movf a0, a1, 0");
"00a6200b movn a0, a1, a2"); COMPARE(movf(s0, s1, 4),
COMPARE(movn(s0, s1, s2), "02308001 movf s0, s1, 4");
"0232800b movn s0, s1, s2"); COMPARE(movf(a6, a7, 5),
COMPARE(movn(a6, a7, t0), "01745001 movf a6, a7, 5");
"016c500b movn a6, a7, t0"); COMPARE(movf(v0, v1, 6),
COMPARE(movn(v0, v1, a2), "00781001 movf v0, v1, 6");
"0066100b movn v0, v1, a2");
COMPARE(movt(a0, a1, 1),
"00a52001 movt a0, a1, 1");
COMPARE(movt(s0, s1, 2),
"02298001 movt s0, s1, 2");
COMPARE(movt(a6, a7, 3),
"016d5001 movt a6, a7, 3");
COMPARE(movt(v0, v1, 7),
"007d1001 movt v0, v1, 7");
COMPARE(movf(a0, a1, 0),
"00a02001 movf a0, a1, 0");
COMPARE(movf(s0, s1, 4),
"02308001 movf s0, s1, 4");
COMPARE(movf(a6, a7, 5),
"01745001 movf a6, a7, 5");
COMPARE(movf(v0, v1, 6),
"00781001 movf v0, v1, 6");
if (kArchVariant == kMips64r6) {
COMPARE(clz(a0, a1),
"00a02050 clz a0, a1");
COMPARE(clz(s6, s7),
"02e0b050 clz s6, s7");
COMPARE(clz(v0, v1),
"00601050 clz v0, v1");
} else {
COMPARE(clz(a0, a1), COMPARE(clz(a0, a1),
"70a42020 clz a0, a1"); "70a42020 clz a0, a1");
COMPARE(clz(s6, s7), COMPARE(clz(s6, s7),
@ -520,20 +657,18 @@ TEST(Type0) {
"70621020 clz v0, v1"); "70621020 clz v0, v1");
} }
if (kArchVariant == kMips64r2) { COMPARE(ins_(a0, a1, 31, 1),
COMPARE(ins_(a0, a1, 31, 1), "7ca4ffc4 ins a0, a1, 31, 1");
"7ca4ffc4 ins a0, a1, 31, 1"); COMPARE(ins_(s6, s7, 30, 2),
COMPARE(ins_(s6, s7, 30, 2), "7ef6ff84 ins s6, s7, 30, 2");
"7ef6ff84 ins s6, s7, 30, 2"); COMPARE(ins_(v0, v1, 0, 32),
COMPARE(ins_(v0, v1, 0, 32), "7c62f804 ins v0, v1, 0, 32");
"7c62f804 ins v0, v1, 0, 32"); COMPARE(ext_(a0, a1, 31, 1),
COMPARE(ext_(a0, a1, 31, 1), "7ca407c0 ext a0, a1, 31, 1");
"7ca407c0 ext a0, a1, 31, 1"); COMPARE(ext_(s6, s7, 30, 2),
COMPARE(ext_(s6, s7, 30, 2), "7ef60f80 ext s6, s7, 30, 2");
"7ef60f80 ext s6, s7, 30, 2"); COMPARE(ext_(v0, v1, 0, 32),
COMPARE(ext_(v0, v1, 0, 32), "7c62f800 ext v0, v1, 0, 32");
"7c62f800 ext v0, v1, 0, 32");
}
VERIFY_RUN(); VERIFY_RUN();
} }

View File

@ -414,6 +414,9 @@
# Currently always deopt on minus zero # Currently always deopt on minus zero
'math-floor-of-div-minus-zero': [SKIP], 'math-floor-of-div-minus-zero': [SKIP],
# BUG(v8:3457).
'deserialize-reference': [SKIP],
}], # 'arch == mips64el' }], # 'arch == mips64el'
['arch == mips64el and simulator_run == False', { ['arch == mips64el and simulator_run == False', {