glibc/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
Andrew Senkevich c10b9b13f7 Vector pow for x86_64 and tests.
Here is implementation of vectorized pow containing SSE, AVX,
AVX2 and AVX512 versions according to Vector ABI
<https://groups.google.com/forum/#!topic/x86-64-abi/LmppCfN1rZ4>.

  * bits/libm-simd-decl-stubs.h: Added stubs for pow.
    * math/bits/mathcalls.h: Added pow declaration with __MATHCALL_VEC.
    * sysdeps/unix/sysv/linux/x86_64/libmvec.abilist: New versions added.
    * sysdeps/x86/fpu/bits/math-vector.h: Added SIMD declaration and asm
    redirections for pow.
    * sysdeps/x86_64/fpu/Makefile (libmvec-support): Added new files.
    * sysdeps/x86_64/fpu/Versions: New versions added.
    * sysdeps/x86_64/fpu/libm-test-ulps: Regenerated.
    * sysdeps/x86_64/fpu/multiarch/Makefile (libmvec-sysdep_routines): Added
    build of SSE, AVX2 and AVX512 IFUNC versions.
    * sysdeps/x86_64/fpu/svml_d_wrapper_impl.h: Added 2 argument wrappers.
    * sysdeps/x86_64/fpu/multiarch/svml_d_pow2_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_pow2_core_sse4.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_pow4_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_pow4_core_avx2.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S: New file.
    * sysdeps/x86_64/fpu/svml_d_pow2_core.S: New file.
    * sysdeps/x86_64/fpu/svml_d_pow4_core.S: New file.
    * sysdeps/x86_64/fpu/svml_d_pow4_core_avx.S: New file.
    * sysdeps/x86_64/fpu/svml_d_pow8_core.S: New file.
    * sysdeps/x86_64/fpu/svml_d_pow_data.S: New file.
    * sysdeps/x86_64/fpu/svml_d_pow_data.h: New file.
    * sysdeps/x86_64/fpu/test-double-vlen2-wrappers.c: Added vector pow test.
    * sysdeps/x86_64/fpu/test-double-vlen2.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4-avx2-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4-avx2.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen8-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen8.c: Likewise.
    * NEWS: Mention addition of x86_64 vector pow.
2015-06-17 16:22:26 +03:00

212 lines
6.0 KiB
C

/* Wrapper implementations of vector math functions.
Copyright (C) 2014-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
subq $40, %rsp
cfi_adjust_cfa_offset(40)
movaps %xmm0, (%rsp)
call \callee@PLT
movsd %xmm0, 16(%rsp)
movsd 8(%rsp), %xmm0
call \callee@PLT
movsd 16(%rsp), %xmm1
movsd %xmm0, 24(%rsp)
unpcklpd %xmm0, %xmm1
movaps %xmm1, %xmm0
addq $40, %rsp
cfi_adjust_cfa_offset(-40)
ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
subq $56, %rsp
cfi_adjust_cfa_offset(56)
movaps %xmm0, (%rsp)
movaps %xmm1, 16(%rsp)
call \callee@PLT
movsd %xmm0, 32(%rsp)
movsd 8(%rsp), %xmm0
movsd 24(%rsp), %xmm1
call \callee@PLT
movsd 32(%rsp), %xmm1
movsd %xmm0, 40(%rsp)
unpcklpd %xmm0, %xmm1
movaps %xmm1, %xmm0
addq $56, %rsp
cfi_adjust_cfa_offset(-56)
ret
.endm
/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX callee
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-32, %rsp
subq $32, %rsp
vextractf128 $1, %ymm0, (%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
vmovapd %xmm0, 16(%rsp)
vmovaps (%rsp), %xmm0
call HIDDEN_JUMPTARGET(\callee)
vmovapd %xmm0, %xmm1
vmovapd 16(%rsp), %xmm0
vinsertf128 $1, %xmm1, %ymm0, %ymm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.endm
/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_ff callee
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-32, %rsp
subq $64, %rsp
vextractf128 $1, %ymm0, 16(%rsp)
vextractf128 $1, %ymm1, (%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
vmovaps %xmm0, 32(%rsp)
vmovaps 16(%rsp), %xmm0
vmovaps (%rsp), %xmm1
call HIDDEN_JUMPTARGET(\callee)
vmovaps %xmm0, %xmm1
vmovaps 32(%rsp), %xmm0
vinsertf128 $1, %xmm1, %ymm0, %ymm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.endm
/* AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512 callee
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
subq $64, %rsp
/* Below is encoding for vmovaps %zmm0, (%rsp). */
.byte 0x62
.byte 0xf1
.byte 0x7c
.byte 0x48
.byte 0x29
.byte 0x04
.byte 0x24
/* Below is encoding for vmovapd (%rsp), %ymm0. */
.byte 0xc5
.byte 0xfd
.byte 0x28
.byte 0x04
.byte 0x24
call HIDDEN_JUMPTARGET(\callee)
/* Below is encoding for vmovapd 32(%rsp), %ymm0. */
.byte 0xc5
.byte 0xfd
.byte 0x28
.byte 0x44
.byte 0x24
.byte 0x20
call HIDDEN_JUMPTARGET(\callee)
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.endm
/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_ff callee
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
subq $128, %rsp
/* Below is encoding for vmovaps %zmm0, (%rsp). */
.byte 0x62
.byte 0xf1
.byte 0x7c
.byte 0x48
.byte 0x29
.byte 0x04
.byte 0x24
/* Below is encoding for vmovaps %zmm1, 64(%rsp). */
.byte 0x62
.byte 0xf1
.byte 0x7c
.byte 0x48
.byte 0x29
.byte 0x4c
.byte 0x24
/* Below is encoding for vmovapd (%rsp), %ymm0. */
.byte 0xc5
.byte 0xfd
.byte 0x28
.byte 0x04
.byte 0x24
/* Below is encoding for vmovapd 64(%rsp), %ymm1. */
.byte 0xc5
.byte 0xfd
.byte 0x28
.byte 0x4c
.byte 0x24
.byte 0x40
call HIDDEN_JUMPTARGET(\callee)
/* Below is encoding for vmovapd 32(%rsp), %ymm0. */
.byte 0xc5
.byte 0xfd
.byte 0x28
.byte 0x44
.byte 0x24
.byte 0x20
/* Below is encoding for vmovapd 96(%rsp), %ymm1. */
.byte 0xc5
.byte 0xfd
.byte 0x28
.byte 0x4c
.byte 0x24
.byte 0x60
call HIDDEN_JUMPTARGET(\callee)
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.endm