[riscv64] Add RISC-V C-Extension Logic to macro-assembler

Lets the macro-assembler compile RISC-V C-Extension instructions
when the corresponding flag is set during runtime.

Change-Id: I443d026653b9945ac7ccff41b0ca3f7db9b65775
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3039384
Reviewed-by: Brice Dobry <brice.dobry@futurewei.com>
Reviewed-by: Ji Qiu <qiuji@iscas.ac.cn>
Commit-Queue: Ji Qiu <qiuji@iscas.ac.cn>
Cr-Commit-Position: refs/heads/master@{#76128}
This commit is contained in:
Derek Tu 2021-07-20 03:26:37 +00:00 committed by V8 LUCI CQ
parent 892af6645f
commit 8c5b3bed17
7 changed files with 311 additions and 103 deletions

View File

@ -275,7 +275,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
while ((pc_offset() & (m - 1)) != 0) {
nop();
NOP();
}
}
@ -1129,9 +1129,9 @@ void Assembler::GenInstrCB(uint8_t funct3, Opcode opcode, Register rs1,
}
void Assembler::GenInstrCBA(uint8_t funct3, uint8_t funct2, Opcode opcode,
Register rs1, uint8_t uimm6) {
DCHECK(is_uint3(funct3) && is_uint2(funct2) && is_uint6(uimm6));
ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) | ((uimm6 & 0x20) << 7) |
Register rs1, int8_t imm6) {
DCHECK(is_uint3(funct3) && is_uint2(funct2) && is_int6(imm6));
ShortInstr instr = opcode | ((imm6 & 0x1f) << 2) | ((imm6 & 0x20) << 7) |
((rs1.code() & 0x7) << kRvcRs1sShift) |
(funct3 << kRvcFunct3Shift) | (funct2 << 10);
emit(instr);
@ -1267,7 +1267,10 @@ uint64_t Assembler::jump_address(Label* L) {
}
}
uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
DCHECK_EQ(imm & 3, 0);
if (FLAG_riscv_c_extension)
DCHECK_EQ(imm & 1, 0);
else
DCHECK_EQ(imm & 3, 0);
return imm;
}
@ -1295,7 +1298,10 @@ uint64_t Assembler::branch_long_offset(Label* L) {
}
}
int64_t offset = target_pos - pc_offset();
DCHECK_EQ(offset & 3, 0);
if (FLAG_riscv_c_extension)
DCHECK_EQ(offset & 1, 0);
else
DCHECK_EQ(offset & 3, 0);
return static_cast<uint64_t>(offset);
}
@ -2213,7 +2219,8 @@ void Assembler::c_fsdsp(FPURegister rs2, uint16_t uimm9) {
void Assembler::c_lw(Register rd, Register rs1, uint16_t uimm7) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7));
((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
((uimm7 & 0x3) == 0));
uint8_t uimm5 =
((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
GenInstrCL(0b010, C0, rd, rs1, uimm5);
@ -2221,14 +2228,16 @@ void Assembler::c_lw(Register rd, Register rs1, uint16_t uimm7) {
void Assembler::c_ld(Register rd, Register rs1, uint16_t uimm8) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8));
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
((uimm8 & 0x7) == 0));
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
GenInstrCL(0b011, C0, rd, rs1, uimm5);
}
void Assembler::c_fld(FPURegister rd, Register rs1, uint16_t uimm8) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8));
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
((uimm8 & 0x7) == 0));
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
GenInstrCL(0b001, C0, rd, rs1, uimm5);
}
@ -2237,7 +2246,8 @@ void Assembler::c_fld(FPURegister rd, Register rs1, uint16_t uimm8) {
void Assembler::c_sw(Register rs2, Register rs1, uint16_t uimm7) {
DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7));
((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
((uimm7 & 0x3) == 0));
uint8_t uimm5 =
((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
GenInstrCS(0b110, C0, rs2, rs1, uimm5);
@ -2245,14 +2255,16 @@ void Assembler::c_sw(Register rs2, Register rs1, uint16_t uimm7) {
void Assembler::c_sd(Register rs2, Register rs1, uint16_t uimm8) {
DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8));
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
((uimm8 & 0x7) == 0));
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
GenInstrCS(0b111, C0, rs2, rs1, uimm5);
}
void Assembler::c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8) {
DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8));
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
((uimm8 & 0x7) == 0));
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
GenInstrCS(0b101, C0, rs2, rs1, uimm5);
}
@ -2285,19 +2297,35 @@ void Assembler::c_beqz(Register rs1, int16_t imm9) {
GenInstrCB(0b110, C1, rs1, uimm8);
}
void Assembler::c_srli(Register rs1, uint8_t uimm6) {
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_uint6(uimm6));
GenInstrCBA(0b100, 0b00, C1, rs1, uimm6);
void Assembler::c_srli(Register rs1, int8_t imm6) {
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(imm6));
GenInstrCBA(0b100, 0b00, C1, rs1, imm6);
}
void Assembler::c_srai(Register rs1, uint8_t uimm6) {
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_uint6(uimm6));
GenInstrCBA(0b100, 0b01, C1, rs1, uimm6);
void Assembler::c_srai(Register rs1, int8_t imm6) {
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(imm6));
GenInstrCBA(0b100, 0b01, C1, rs1, imm6);
}
void Assembler::c_andi(Register rs1, uint8_t uimm6) {
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_uint6(uimm6));
GenInstrCBA(0b100, 0b10, C1, rs1, uimm6);
void Assembler::c_andi(Register rs1, int8_t imm6) {
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(imm6));
GenInstrCBA(0b100, 0b10, C1, rs1, imm6);
}
// Definitions for using compressed vs non compressed
void Assembler::NOP() {
if (FLAG_riscv_c_extension)
c_nop();
else
nop();
}
void Assembler::EBREAK() {
if (FLAG_riscv_c_extension)
c_ebreak();
else
ebreak();
}
// Privileged

View File

@ -660,9 +660,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline void c_bnez(Register rs1, Label* L) { c_bnez(rs1, branch_offset(L)); }
void c_beqz(Register rs1, int16_t imm9);
inline void c_beqz(Register rs1, Label* L) { c_beqz(rs1, branch_offset(L)); }
void c_srli(Register rs1, uint8_t uimm6);
void c_srai(Register rs1, uint8_t uimm6);
void c_andi(Register rs1, uint8_t uimm6);
void c_srli(Register rs1, int8_t imm6);
void c_srai(Register rs1, int8_t imm6);
void c_andi(Register rs1, int8_t imm6);
void NOP();
void EBREAK();
// Privileged
void uret();
@ -1144,7 +1146,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenInstrCJ(uint8_t funct3, Opcode opcode, uint16_t uint11);
void GenInstrCB(uint8_t funct3, Opcode opcode, Register rs1, uint8_t uimm8);
void GenInstrCBA(uint8_t funct3, uint8_t funct2, Opcode opcode, Register rs1,
uint8_t uimm6);
int8_t imm6);
// ----- Instruction class templates match those in LLVM's RISCVInstrInfo.td
void GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,

View File

@ -1033,8 +1033,6 @@ class InstructionGetters : public T {
DCHECK(this->IsShortInstruction());
// | funct3 | imm[5] | rs1/rd | imm[4:0] | opcode |
// 15 12 6 2
// | funct3 | nzimm[17] | rs1/rd | nzimm[16:12] | opcode |
// 15 12 6 2
uint32_t Bits = this->InstructionBits();
int32_t imm6 = ((Bits & 0x1000) >> 7) | ((Bits & 0x7c) >> 2);
return imm6 << 26 >> 26;

View File

@ -142,7 +142,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
Add64(fp, sp, Operand(kSystemPointerSize));
} else {
Push(ra, fp);
mv(fp, sp);
Mv(fp, sp);
}
}
@ -378,9 +378,19 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
addw(rd, rs, rt.rm());
if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
((rt.rm().code() & 0b11000) == 0b01000)) {
c_addw(rd, rt.rm());
} else {
addw(rd, rs, rt.rm());
}
} else {
if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
if (FLAG_riscv_c_extension && is_int6(rt.immediate()) &&
(rd.code() == rs.code()) && (rd != zero_reg) &&
!MustUseReg(rt.rmode())) {
c_addiw(rd, static_cast<int8_t>(rt.immediate()));
} else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
addiw(rd, rs, static_cast<int32_t>(rt.immediate()));
} else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
(2048 <= rt.immediate() && rt.immediate() <= 4094)) {
@ -390,7 +400,7 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
addw(rd, rs, scratch);
}
}
@ -398,9 +408,27 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
add(rd, rs, rt.rm());
if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
(rt.rm() != zero_reg) && (rs != zero_reg)) {
c_add(rd, rt.rm());
} else {
add(rd, rs, rt.rm());
}
} else {
if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
if (FLAG_riscv_c_extension && is_int6(rt.immediate()) &&
(rd.code() == rs.code()) && (rd != zero_reg) && (rt.immediate() != 0) &&
!MustUseReg(rt.rmode())) {
c_addi(rd, static_cast<int8_t>(rt.immediate()));
} else if (FLAG_riscv_c_extension && is_int10(rt.immediate()) &&
(rt.immediate() != 0) && ((rt.immediate() & 0xf) == 0) &&
(rd.code() == rs.code()) && (rd == sp) &&
!MustUseReg(rt.rmode())) {
c_addi16sp(static_cast<int16_t>(rt.immediate()));
} else if (FLAG_riscv_c_extension && ((rd.code() & 0b11000) == 0b01000) &&
(rs == sp) && is_uint10(rt.immediate()) &&
(rt.immediate() != 0) && !MustUseReg(rt.rmode())) {
c_addi4spn(rd, static_cast<uint16_t>(rt.immediate()));
} else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
addi(rd, rs, static_cast<int32_t>(rt.immediate()));
} else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
(2048 <= rt.immediate() && rt.immediate() <= 4094)) {
@ -411,7 +439,7 @@ void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
add(rd, rs, scratch);
}
}
@ -419,10 +447,23 @@ void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
subw(rd, rs, rt.rm());
if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
((rt.rm().code() & 0b11000) == 0b01000)) {
c_subw(rd, rt.rm());
} else {
subw(rd, rs, rt.rm());
}
} else {
DCHECK(is_int32(rt.immediate()));
if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) {
if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
(rd != zero_reg) && is_int6(-rt.immediate()) &&
!MustUseReg(rt.rmode())) {
c_addiw(
rd,
static_cast<int8_t>(
-rt.immediate())); // No c_subiw instr, use c_addiw(x, y, -imm).
} else if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) {
addiw(rd, rs,
static_cast<int32_t>(
-rt.immediate())); // No subiw instr, use addiw(x, y, -imm).
@ -435,11 +476,11 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
Register scratch = temps.Acquire();
if (-rt.immediate() >> 12 == 0 && !MustUseReg(rt.rmode())) {
// Use load -imm and addu when loading -imm generates one instruction.
RV_li(scratch, -rt.immediate());
Li(scratch, -rt.immediate());
addw(rd, rs, scratch);
} else {
// li handles the relocation.
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
subw(rd, rs, scratch);
}
}
@ -448,7 +489,25 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sub(rd, rs, rt.rm());
if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
((rt.rm().code() & 0b11000) == 0b01000)) {
c_sub(rd, rt.rm());
} else {
sub(rd, rs, rt.rm());
}
} else if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
(rd != zero_reg) && is_int6(-rt.immediate()) &&
(rt.immediate() != 0) && !MustUseReg(rt.rmode())) {
c_addi(rd,
static_cast<int8_t>(
-rt.immediate())); // No c_subi instr, use c_addi(x, y, -imm).
} else if (FLAG_riscv_c_extension && is_int10(-rt.immediate()) &&
(rt.immediate() != 0) && ((rt.immediate() & 0xf) == 0) &&
(rd.code() == rs.code()) && (rd == sp) &&
!MustUseReg(rt.rmode())) {
c_addi16sp(static_cast<int16_t>(-rt.immediate()));
} else if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) {
addi(rd, rs,
static_cast<int32_t>(
@ -465,13 +524,13 @@ void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
DCHECK(rt.immediate() != std::numeric_limits<int32_t>::min());
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, -rt.immediate());
Li(scratch, -rt.immediate());
add(rd, rs, scratch);
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
sub(rd, rs, scratch);
}
}
@ -484,7 +543,7 @@ void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
mulw(rd, rs, scratch);
}
}
@ -496,7 +555,7 @@ void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
mul(rd, rs, scratch);
}
srai(rd, rd, 32);
@ -505,10 +564,11 @@ void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Mulhu32(Register rd, Register rs, const Operand& rt,
Register rsz, Register rtz) {
slli(rsz, rs, 32);
if (rt.is_reg())
if (rt.is_reg()) {
slli(rtz, rt.rm(), 32);
else
RV_li(rtz, rt.immediate() << 32);
} else {
Li(rtz, rt.immediate() << 32);
}
mulhu(rd, rsz, rtz);
srai(rd, rd, 32);
}
@ -520,7 +580,7 @@ void TurboAssembler::Mul64(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
mul(rd, rs, scratch);
}
}
@ -532,7 +592,7 @@ void TurboAssembler::Mulh64(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
mulh(rd, rs, scratch);
}
}
@ -544,7 +604,7 @@ void TurboAssembler::Div32(Register res, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
divw(res, rs, scratch);
}
}
@ -556,7 +616,7 @@ void TurboAssembler::Mod32(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
remw(rd, rs, scratch);
}
}
@ -568,7 +628,7 @@ void TurboAssembler::Modu32(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
remuw(rd, rs, scratch);
}
}
@ -580,7 +640,7 @@ void TurboAssembler::Div64(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
div(rd, rs, scratch);
}
}
@ -592,7 +652,7 @@ void TurboAssembler::Divu32(Register res, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
divuw(res, rs, scratch);
}
}
@ -604,7 +664,7 @@ void TurboAssembler::Divu64(Register res, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
divu(res, rs, scratch);
}
}
@ -616,7 +676,7 @@ void TurboAssembler::Mod64(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
rem(rd, rs, scratch);
}
}
@ -628,22 +688,32 @@ void TurboAssembler::Modu64(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
remu(rd, rs, scratch);
}
}
void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
((rt.rm().code() & 0b11000) == 0b01000)) {
c_and(rd, rt.rm());
} else {
and_(rd, rs, rt.rm());
}
} else {
if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
if (FLAG_riscv_c_extension && is_int6(rt.immediate()) &&
!MustUseReg(rt.rmode()) && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000)) {
c_andi(rd, static_cast<int8_t>(rt.immediate()));
} else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
andi(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
and_(rd, rs, scratch);
}
}
@ -651,7 +721,13 @@ void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
((rt.rm().code() & 0b11000) == 0b01000)) {
c_or(rd, rt.rm());
} else {
or_(rd, rs, rt.rm());
}
} else {
if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
ori(rd, rs, static_cast<int32_t>(rt.immediate()));
@ -659,7 +735,7 @@ void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
or_(rd, rs, scratch);
}
}
@ -667,7 +743,13 @@ void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
((rt.rm().code() & 0b11000) == 0b01000)) {
c_xor(rd, rt.rm());
} else {
xor_(rd, rs, rt.rm());
}
} else {
if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
xori(rd, rs, static_cast<int32_t>(rt.immediate()));
@ -675,7 +757,7 @@ void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
xor_(rd, rs, scratch);
}
}
@ -745,7 +827,7 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
slt(rd, rs, scratch);
}
}
@ -762,7 +844,7 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
sltu(rd, rs, scratch);
}
}
@ -776,7 +858,7 @@ void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
slt(rd, scratch, rs);
}
xori(rd, rd, 1);
@ -790,7 +872,7 @@ void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
sltu(rd, scratch, rs);
}
xori(rd, rd, 1);
@ -814,7 +896,7 @@ void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
slt(rd, scratch, rs);
}
}
@ -827,7 +909,7 @@ void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
RV_li(scratch, rt.immediate());
Li(scratch, rt.immediate());
sltu(rd, scratch, rs);
}
}
@ -862,6 +944,10 @@ void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sra(rd, rs, rt.rm());
} else if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) && is_int6(rt.immediate())) {
uint8_t shamt = static_cast<uint8_t>(rt.immediate());
c_srai(rd, shamt);
} else {
uint8_t shamt = static_cast<uint8_t>(rt.immediate());
srai(rd, rs, shamt);
@ -871,6 +957,10 @@ void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Srl64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
srl(rd, rs, rt.rm());
} else if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) && is_int6(rt.immediate())) {
uint8_t shamt = static_cast<uint8_t>(rt.immediate());
c_srli(rd, shamt);
} else {
uint8_t shamt = static_cast<uint8_t>(rt.immediate());
srli(rd, rs, shamt);
@ -882,7 +972,28 @@ void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
sll(rd, rs, rt.rm());
} else {
uint8_t shamt = static_cast<uint8_t>(rt.immediate());
slli(rd, rs, shamt);
if (FLAG_riscv_c_extension && (rd.code() == rs.code()) &&
(rd != zero_reg) && (shamt != 0) && is_uint6(shamt)) {
c_slli(rd, shamt);
} else {
slli(rd, rs, shamt);
}
}
}
void TurboAssembler::Li(Register rd, int64_t imm) {
if (FLAG_riscv_c_extension && (rd != zero_reg) && is_int6(imm)) {
c_li(rd, imm);
} else {
RV_li(rd, imm);
}
}
void TurboAssembler::Mv(Register rd, const Operand& rt) {
if (FLAG_riscv_c_extension && (rd != zero_reg) && (rt.rm() != zero_reg)) {
c_mv(rd, rt.rm());
} else {
mv(rd, rt.rm());
}
}
@ -899,7 +1010,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
} else {
int64_t ror_value = rt.immediate() % 32;
if (ror_value == 0) {
mv(rd, rs);
Mv(rd, rs);
return;
} else if (ror_value < 0) {
ror_value += 32;
@ -923,7 +1034,7 @@ void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
} else {
int64_t dror_value = rt.immediate() % 64;
if (dror_value == 0) {
mv(rd, rs);
Mv(rd, rs);
return;
} else if (dror_value < 0) {
dror_value += 64;
@ -1303,7 +1414,17 @@ void TurboAssembler::Sh(Register rd, const MemOperand& rs) {
void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
this->lw(target, source.rm(), source.offset());
if (FLAG_riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
is_uint7(source.offset()) && ((source.offset() & 0x3) == 0)) {
this->c_lw(target, source.rm(), source.offset());
} else if (FLAG_riscv_c_extension && (target != zero_reg) &&
is_uint8(source.offset()) && (source.rm() == sp) &&
((source.offset() & 0x3) == 0)) {
this->c_lwsp(target, source.offset());
} else {
this->lw(target, source.rm(), source.offset());
}
};
AlignedLoadHelper(rd, rs, fn);
}
@ -1317,21 +1438,49 @@ void TurboAssembler::Lwu(Register rd, const MemOperand& rs) {
void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
auto fn = [this](Register value, const MemOperand& source) {
this->sw(value, source.rm(), source.offset());
if (FLAG_riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
is_uint7(source.offset()) && ((source.offset() & 0x3) == 0)) {
this->c_sw(value, source.rm(), source.offset());
} else if (FLAG_riscv_c_extension && (source.rm() == sp) &&
is_uint8(source.offset()) && (((source.offset() & 0x3) == 0))) {
this->c_swsp(value, source.offset());
} else {
this->sw(value, source.rm(), source.offset());
}
};
AlignedStoreHelper(rd, rs, fn);
}
void TurboAssembler::Ld(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
this->ld(target, source.rm(), source.offset());
if (FLAG_riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
this->c_ld(target, source.rm(), source.offset());
} else if (FLAG_riscv_c_extension && (target != zero_reg) &&
is_uint9(source.offset()) && (source.rm() == sp) &&
((source.offset() & 0x7) == 0)) {
this->c_ldsp(target, source.offset());
} else {
this->ld(target, source.rm(), source.offset());
}
};
AlignedLoadHelper(rd, rs, fn);
}
void TurboAssembler::Sd(Register rd, const MemOperand& rs) {
auto fn = [this](Register value, const MemOperand& source) {
this->sd(value, source.rm(), source.offset());
if (FLAG_riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
this->c_sd(value, source.rm(), source.offset());
} else if (FLAG_riscv_c_extension && (source.rm() == sp) &&
is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) {
this->c_sdsp(value, source.offset());
} else {
this->sd(value, source.rm(), source.offset());
}
};
AlignedStoreHelper(rd, rs, fn);
}
@ -1352,14 +1501,32 @@ void TurboAssembler::StoreFloat(FPURegister fs, const MemOperand& src) {
void TurboAssembler::LoadDouble(FPURegister fd, const MemOperand& src) {
auto fn = [this](FPURegister target, const MemOperand& source) {
this->fld(target, source.rm(), source.offset());
if (FLAG_riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
this->c_fld(target, source.rm(), source.offset());
} else if (FLAG_riscv_c_extension && (source.rm() == sp) &&
is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) {
this->c_fldsp(target, source.offset());
} else {
this->fld(target, source.rm(), source.offset());
}
};
AlignedLoadHelper(fd, src, fn);
}
void TurboAssembler::StoreDouble(FPURegister fs, const MemOperand& src) {
auto fn = [this](FPURegister value, const MemOperand& source) {
this->fsd(value, source.rm(), source.offset());
if (FLAG_riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
this->c_fsd(value, source.rm(), source.offset());
} else if (FLAG_riscv_c_extension && (source.rm() == sp) &&
is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) {
this->c_fsdsp(value, source.offset());
} else {
this->fsd(value, source.rm(), source.offset());
}
};
AlignedStoreHelper(fs, src, fn);
}
@ -1469,7 +1636,7 @@ void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
DCHECK(!MustUseReg(j.rmode()));
DCHECK(mode == OPTIMIZE_SIZE);
RV_li(rd, j.immediate());
Li(rd, j.immediate());
}
void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
@ -1487,10 +1654,10 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
ld(rd, rd, 0);
} else {
if ((count - reverse_count) > 1) {
RV_li(rd, ~j.immediate());
Li(rd, ~j.immediate());
not_(rd, rd);
} else {
RV_li(rd, j.immediate());
Li(rd, j.immediate());
}
}
} else if (MustUseReg(j.rmode())) {
@ -3191,13 +3358,14 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// Note that this assumes the caller code (i.e. the Code object currently
// being generated) is immovable or that the callee function cannot trigger
// GC, since the callee function will return to it.
//
// Compute the return address in lr to return to after the jump below. The
// pc is already at '+ 8' from the current instruction; but return is after
// three instructions, so add another 4 to pc to get the return address.
//
Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
static constexpr int kNumInstructionsToJump = 5;
int kNumInstructionsToJump = 5;
if (FLAG_riscv_c_extension) kNumInstructionsToJump = 4;
Label find_ra;
// Adjust the value in ra to point to the correct return location, one
// instruction past the real call into C code (the jalr(t6)), and push it.
@ -3214,7 +3382,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// Stack is still aligned.
// Call the C routine.
mv(t6,
Mv(t6,
target); // Function pointer to t6 to conform to ABI for PIC.
jalr(t6);
// Make sure the stored 'ra' points to this position.
@ -3307,9 +3475,9 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
Xor(reg2, reg2, Operand(reg1));
Xor(reg1, reg1, Operand(reg2));
} else {
mv(scratch, reg1);
mv(reg1, reg2);
mv(reg2, scratch);
Mv(scratch, reg1);
Mv(reg1, reg2);
Mv(reg2, scratch);
}
}
@ -3343,7 +3511,7 @@ void TurboAssembler::PushArray(Register array, Register size,
Register scratch2 = temps.Acquire();
Label loop, entry;
if (order == PushArrayOrder::kReverse) {
mv(scratch, zero_reg);
Mv(scratch, zero_reg);
jmp(&entry);
bind(&loop);
CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2);
@ -3353,7 +3521,7 @@ void TurboAssembler::PushArray(Register array, Register size,
bind(&entry);
Branch(&loop, less, scratch, Operand(size));
} else {
mv(scratch, size);
Mv(scratch, size);
jmp(&entry);
bind(&loop);
CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2);
@ -3487,7 +3655,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Branch(&loop, ne, sp, Operand(src_reg));
// Leave current frame.
mv(sp, dst_reg);
Mv(sp, dst_reg);
}
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
@ -3768,7 +3936,7 @@ void TurboAssembler::AddOverflow64(Register dst, Register left,
xor_(overflow, scratch2, left);
xor_(scratch, scratch2, right_reg);
and_(overflow, overflow, scratch);
mv(dst, scratch2);
Mv(dst, scratch2);
} else {
add(dst, left, right_reg);
xor_(overflow, dst, left);
@ -3800,7 +3968,7 @@ void TurboAssembler::SubOverflow64(Register dst, Register left,
xor_(overflow, left, scratch2);
xor_(scratch, left, right_reg);
and_(overflow, overflow, scratch);
mv(dst, scratch2);
Mv(dst, scratch2);
} else {
sub(dst, left, right_reg);
xor_(overflow, left, dst);
@ -4150,7 +4318,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
// Pop the arguments, restore registers, and return.
mv(sp, fp); // Respect ABI stack constraint.
Mv(sp, fp); // Respect ABI stack constraint.
Ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
Ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
@ -4452,7 +4620,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
if (frame_alignment > kSystemPointerSize) {
// Make stack end at alignment and make room for stack arguments and the
// original value of sp.
mv(scratch, sp);
Mv(scratch, sp);
Sub64(sp, sp, Operand((stack_passed_arguments + 1) * kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment));
@ -4526,7 +4694,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// stays correct.
{
if (function != t6) {
mv(t6, function);
Mv(t6, function);
function = t6;
}

View File

@ -407,6 +407,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
#define DEFINE_INSTRUCTION3(instr) void instr(Register rd, int64_t imm);
DEFINE_INSTRUCTION(Add32)
DEFINE_INSTRUCTION(Add64)
DEFINE_INSTRUCTION(Div32)
@ -457,6 +459,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
DEFINE_INSTRUCTION(Ror)
DEFINE_INSTRUCTION(Dror)
DEFINE_INSTRUCTION3(Li)
DEFINE_INSTRUCTION2(Mv)
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
#undef DEFINE_INSTRUCTION3

View File

@ -1271,7 +1271,7 @@ T Simulator::ReadMem(int64_t addr, Instruction* instr) {
}
#ifndef V8_COMPRESS_POINTERS // TODO(RISCV): v8:11812
// check for natural alignment
if ((addr & (sizeof(T) - 1)) != 0) {
if (!FLAG_riscv_c_extension && ((addr & (sizeof(T) - 1)) != 0)) {
PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
DieOrDebug();
@ -1293,7 +1293,7 @@ void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) {
}
#ifndef V8_COMPRESS_POINTERS // TODO(RISCV): v8:11812
// check for natural alignment
if ((addr & (sizeof(T) - 1)) != 0) {
if (!FLAG_riscv_c_extension && ((addr & (sizeof(T) - 1)) != 0)) {
PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
DieOrDebug();
@ -3316,20 +3316,22 @@ void Simulator::DecodeCLType() {
switch (instr_.RvcOpcode()) {
case RO_C_LW: {
int64_t addr = rvc_rs1s() + rvc_imm5_w();
auto val = ReadMem<int32_t>(addr, instr_.instr());
int64_t val = ReadMem<int32_t>(addr, instr_.instr());
set_rvc_rs2s(sext_xlen(val), false);
TraceMemRd(addr, val, get_register(rvc_rs2s_reg()));
break;
}
case RO_C_LD: {
int64_t addr = rvc_rs1s() + rvc_imm5_d();
auto val = ReadMem<int64_t>(addr, instr_.instr());
int64_t val = ReadMem<int64_t>(addr, instr_.instr());
set_rvc_rs2s(sext_xlen(val), false);
TraceMemRd(addr, val, get_register(rvc_rs2s_reg()));
break;
}
case RO_C_FLD: {
int64_t addr = rvc_rs1s() + rvc_imm5_d();
auto val = ReadMem<double>(addr, instr_.instr());
set_rvc_drs2s(sext_xlen(val), false);
double val = ReadMem<double>(addr, instr_.instr());
set_rvc_drs2s(val, false);
break;
}
default:

View File

@ -522,11 +522,15 @@ class Simulator : public SimulatorBase {
set_register(rvc_rs1s_reg(), value);
if (trace) TraceRegWr(get_register(rvc_rs1s_reg()), DWORD);
}
inline void set_rvc_rs2(int64_t value, bool trace = true) {
set_register(rvc_rs2_reg(), value);
if (trace) TraceRegWr(get_register(rvc_rs2_reg()), DWORD);
}
inline void set_rvc_drd(double value, bool trace = true) {
set_fpu_register_double(rvc_rd_reg(), value);
if (trace) TraceRegWr(get_fpu_register(rvc_rd_reg()), DOUBLE);
}
inline void set_rvc_rs2s(double value, bool trace = true) {
inline void set_rvc_rs2s(int64_t value, bool trace = true) {
set_register(rvc_rs2s_reg(), value);
if (trace) TraceRegWr(get_register(rvc_rs2s_reg()), DWORD);
}