[x64] Emit vmovss when AVX is enabled.

BUG=v8:4406
LOG=N

Review URL: https://codereview.chromium.org/1413183002

Cr-Commit-Position: refs/heads/master@{#31385}
This commit is contained in:
alph 2015-10-19 13:35:17 -07:00 committed by Commit bot
parent 8fafb2916c
commit 19aa500567
9 changed files with 57 additions and 5 deletions

View File

@ -1322,7 +1322,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
ASSEMBLE_CHECKED_LOAD_FLOAT(Movss);
break;
case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd);
@ -1340,7 +1340,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_STORE_INTEGER(movq);
break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT(movss);
ASSEMBLE_CHECKED_STORE_FLOAT(Movss);
break;
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(Movsd);

View File

@ -2951,6 +2951,7 @@ void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
void Assembler::movss(XMMRegister dst, const Operand& src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3); // single
emit_optional_rex_32(dst, src);
@ -2961,6 +2962,7 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
void Assembler::movss(const Operand& src, XMMRegister dst) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3); // single
emit_optional_rex_32(dst, src);

View File

@ -1413,6 +1413,12 @@ class Assembler : public AssemblerBase {
void vminss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vss(0x5d, dst, src1, src2);
}
void vmovss(XMMRegister dst, const Operand& src) {
vss(0x10, dst, xmm0, src);
}
void vmovss(const Operand& dst, XMMRegister src) {
vss(0x11, src, xmm0, dst);
}
void vucomiss(XMMRegister dst, XMMRegister src);
void vucomiss(XMMRegister dst, const Operand& src);
void vss(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);

View File

@ -956,6 +956,15 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x10:
AppendToBuffer("vmovss %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
case 0x11:
AppendToBuffer("vmovss ");
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
case 0x58:
AppendToBuffer("vaddss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));

View File

@ -4243,7 +4243,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == FLOAT32_ELEMENTS) {
XMMRegister value(ToDoubleRegister(instr->value()));
__ Cvtsd2ss(value, value);
__ movss(operand, value);
__ Movss(operand, value);
} else if (elements_kind == FLOAT64_ELEMENTS) {
__ Movsd(operand, ToDoubleRegister(instr->value()));
} else {

View File

@ -2569,6 +2569,26 @@ void MacroAssembler::Movsd(const Operand& dst, XMMRegister src) {
}
void MacroAssembler::Movss(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovss(dst, src);
} else {
movss(dst, src);
}
}
void MacroAssembler::Movss(const Operand& dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovss(dst, src);
} else {
movss(dst, src);
}
}
void MacroAssembler::Movd(XMMRegister dst, Register src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);

View File

@ -908,6 +908,8 @@ class MacroAssembler: public Assembler {
void Movsd(XMMRegister dst, XMMRegister src);
void Movsd(XMMRegister dst, const Operand& src);
void Movsd(const Operand& dst, XMMRegister src);
void Movss(XMMRegister dst, const Operand& src);
void Movss(const Operand& dst, XMMRegister src);
void Movd(XMMRegister dst, Register src);
void Movd(XMMRegister dst, const Operand& src);

View File

@ -1278,8 +1278,18 @@ TEST(AssemblerX64AVX_ss) {
CpuFeatureScope avx_scope(&assm, AVX);
Label exit;
// arguments in xmm0, xmm1 and xmm2
__ movl(rax, Immediate(0));
__ subq(rsp, Immediate(kDoubleSize * 2)); // For memory operand
__ movl(rdx, Immediate(0xc2f64000)); // -123.125
__ vmovd(xmm4, rdx);
__ vmovss(Operand(rsp, 0), xmm4);
__ vmovss(xmm5, Operand(rsp, 0));
__ vmovd(rcx, xmm5);
__ cmpl(rcx, rdx);
__ movl(rax, Immediate(9));
__ j(not_equal, &exit);
__ movl(rax, Immediate(0));
__ vmaxss(xmm3, xmm0, xmm1);
__ vucomiss(xmm3, xmm1);
__ j(parity_even, &exit);
@ -1320,6 +1330,7 @@ TEST(AssemblerX64AVX_ss) {
// result in eax
__ bind(&exit);
__ addq(rsp, Immediate(kDoubleSize * 2));
__ ret(0);
}
@ -1367,7 +1378,7 @@ TEST(AssemblerX64AVX_sd) {
__ vcvtsd2ss(xmm6, xmm6, Operand(rsp, 0));
__ vcvtss2sd(xmm7, xmm6, xmm6);
__ vcvtsd2ss(xmm8, xmm7, xmm7);
__ movss(Operand(rsp, 0), xmm8);
__ vmovss(Operand(rsp, 0), xmm8);
__ vcvtss2sd(xmm9, xmm8, Operand(rsp, 0));
__ vmovq(rcx, xmm9);
__ cmpq(rcx, rdx);

View File

@ -508,6 +508,8 @@ TEST(DisasmX64) {
__ vminss(xmm9, xmm1, Operand(rbx, rcx, times_8, 10000));
__ vmaxss(xmm8, xmm1, xmm2);
__ vmaxss(xmm9, xmm1, Operand(rbx, rcx, times_1, 10000));
__ vmovss(xmm9, Operand(r11, rcx, times_8, -10000));
__ vmovss(Operand(rbx, r9, times_4, 10000), xmm1);
__ vucomiss(xmm9, xmm1);
__ vucomiss(xmm8, Operand(rbx, rdx, times_2, 10981));