x86-64: Add vector asin/asinf implementation to libmvec

Implement vectorized asin/asinf containing SSE, AVX, AVX2 and
AVX512 versions for libmvec as per vector ABI.  It also contains
accuracy and ABI tests for vector asin/asinf with regenerated ulps.

Reviewed-by: H.J. Lu <hjl.tools@gmail.com>
This commit is contained in:
Sunil K Pandey 2021-12-29 08:29:26 -08:00
parent 146310177a
commit 11c01de14c
50 changed files with 2189 additions and 1 deletions

View File

@ -120,4 +120,15 @@
#define __DECL_SIMD_atanf32x #define __DECL_SIMD_atanf32x
#define __DECL_SIMD_atanf64x #define __DECL_SIMD_atanf64x
#define __DECL_SIMD_atanf128x #define __DECL_SIMD_atanf128x
#define __DECL_SIMD_asin
#define __DECL_SIMD_asinf
#define __DECL_SIMD_asinl
#define __DECL_SIMD_asinf16
#define __DECL_SIMD_asinf32
#define __DECL_SIMD_asinf64
#define __DECL_SIMD_asinf128
#define __DECL_SIMD_asinf32x
#define __DECL_SIMD_asinf64x
#define __DECL_SIMD_asinf128x
#endif #endif

View File

@ -52,7 +52,7 @@
/* Arc cosine of X. */ /* Arc cosine of X. */
__MATHCALL_VEC (acos,, (_Mdouble_ __x)); __MATHCALL_VEC (acos,, (_Mdouble_ __x));
/* Arc sine of X. */ /* Arc sine of X. */
__MATHCALL (asin,, (_Mdouble_ __x)); __MATHCALL_VEC (asin,, (_Mdouble_ __x));
/* Arc tangent of X. */ /* Arc tangent of X. */
__MATHCALL_VEC (atan,, (_Mdouble_ __x)); __MATHCALL_VEC (atan,, (_Mdouble_ __x));
/* Arc tangent of Y/X. */ /* Arc tangent of Y/X. */

View File

@ -47,18 +47,26 @@ GLIBC_2.22 _ZGVeN8v_sin F
GLIBC_2.22 _ZGVeN8vv_pow F GLIBC_2.22 _ZGVeN8vv_pow F
GLIBC_2.22 _ZGVeN8vvv_sincos F GLIBC_2.22 _ZGVeN8vvv_sincos F
GLIBC_2.35 _ZGVbN2v_acos F GLIBC_2.35 _ZGVbN2v_acos F
GLIBC_2.35 _ZGVbN2v_asin F
GLIBC_2.35 _ZGVbN2v_atan F GLIBC_2.35 _ZGVbN2v_atan F
GLIBC_2.35 _ZGVbN4v_acosf F GLIBC_2.35 _ZGVbN4v_acosf F
GLIBC_2.35 _ZGVbN4v_asinf F
GLIBC_2.35 _ZGVbN4v_atanf F GLIBC_2.35 _ZGVbN4v_atanf F
GLIBC_2.35 _ZGVcN4v_acos F GLIBC_2.35 _ZGVcN4v_acos F
GLIBC_2.35 _ZGVcN4v_asin F
GLIBC_2.35 _ZGVcN4v_atan F GLIBC_2.35 _ZGVcN4v_atan F
GLIBC_2.35 _ZGVcN8v_acosf F GLIBC_2.35 _ZGVcN8v_acosf F
GLIBC_2.35 _ZGVcN8v_asinf F
GLIBC_2.35 _ZGVcN8v_atanf F GLIBC_2.35 _ZGVcN8v_atanf F
GLIBC_2.35 _ZGVdN4v_acos F GLIBC_2.35 _ZGVdN4v_acos F
GLIBC_2.35 _ZGVdN4v_asin F
GLIBC_2.35 _ZGVdN4v_atan F GLIBC_2.35 _ZGVdN4v_atan F
GLIBC_2.35 _ZGVdN8v_acosf F GLIBC_2.35 _ZGVdN8v_acosf F
GLIBC_2.35 _ZGVdN8v_asinf F
GLIBC_2.35 _ZGVdN8v_atanf F GLIBC_2.35 _ZGVdN8v_atanf F
GLIBC_2.35 _ZGVeN16v_acosf F GLIBC_2.35 _ZGVeN16v_acosf F
GLIBC_2.35 _ZGVeN16v_asinf F
GLIBC_2.35 _ZGVeN16v_atanf F GLIBC_2.35 _ZGVeN16v_atanf F
GLIBC_2.35 _ZGVeN8v_acos F GLIBC_2.35 _ZGVeN8v_acos F
GLIBC_2.35 _ZGVeN8v_asin F
GLIBC_2.35 _ZGVeN8v_atan F GLIBC_2.35 _ZGVeN8v_atan F

View File

@ -66,6 +66,10 @@
# define __DECL_SIMD_atan __DECL_SIMD_x86_64 # define __DECL_SIMD_atan __DECL_SIMD_x86_64
# undef __DECL_SIMD_atanf # undef __DECL_SIMD_atanf
# define __DECL_SIMD_atanf __DECL_SIMD_x86_64 # define __DECL_SIMD_atanf __DECL_SIMD_x86_64
# undef __DECL_SIMD_asin
# define __DECL_SIMD_asin __DECL_SIMD_x86_64
# undef __DECL_SIMD_asinf
# define __DECL_SIMD_asinf __DECL_SIMD_x86_64
# endif # endif
#endif #endif

View File

@ -32,6 +32,8 @@
!GCC$ builtin (acosf) attributes simd (notinbranch) if('x86_64') !GCC$ builtin (acosf) attributes simd (notinbranch) if('x86_64')
!GCC$ builtin (atan) attributes simd (notinbranch) if('x86_64') !GCC$ builtin (atan) attributes simd (notinbranch) if('x86_64')
!GCC$ builtin (atanf) attributes simd (notinbranch) if('x86_64') !GCC$ builtin (atanf) attributes simd (notinbranch) if('x86_64')
!GCC$ builtin (asin) attributes simd (notinbranch) if('x86_64')
!GCC$ builtin (asinf) attributes simd (notinbranch) if('x86_64')
!GCC$ builtin (cos) attributes simd (notinbranch) if('x32') !GCC$ builtin (cos) attributes simd (notinbranch) if('x32')
!GCC$ builtin (cosf) attributes simd (notinbranch) if('x32') !GCC$ builtin (cosf) attributes simd (notinbranch) if('x32')
@ -49,3 +51,5 @@
!GCC$ builtin (acosf) attributes simd (notinbranch) if('x32') !GCC$ builtin (acosf) attributes simd (notinbranch) if('x32')
!GCC$ builtin (atan) attributes simd (notinbranch) if('x32') !GCC$ builtin (atan) attributes simd (notinbranch) if('x32')
!GCC$ builtin (atanf) attributes simd (notinbranch) if('x32') !GCC$ builtin (atanf) attributes simd (notinbranch) if('x32')
!GCC$ builtin (asin) attributes simd (notinbranch) if('x32')
!GCC$ builtin (asinf) attributes simd (notinbranch) if('x32')

View File

@ -23,6 +23,7 @@ postclean-generated += libmvec.mk
# Define for both math and mathvec directories. # Define for both math and mathvec directories.
libmvec-funcs = \ libmvec-funcs = \
acos \ acos \
asin \
atan \ atan \
cos \ cos \
exp \ exp \

View File

@ -15,8 +15,10 @@ libmvec {
} }
GLIBC_2.35 { GLIBC_2.35 {
_ZGVbN2v_acos; _ZGVcN4v_acos; _ZGVdN4v_acos; _ZGVeN8v_acos; _ZGVbN2v_acos; _ZGVcN4v_acos; _ZGVdN4v_acos; _ZGVeN8v_acos;
_ZGVbN2v_asin; _ZGVcN4v_asin; _ZGVdN4v_asin; _ZGVeN8v_asin;
_ZGVbN2v_atan; _ZGVcN4v_atan; _ZGVdN4v_atan; _ZGVeN8v_atan; _ZGVbN2v_atan; _ZGVcN4v_atan; _ZGVdN4v_atan; _ZGVeN8v_atan;
_ZGVbN4v_acosf; _ZGVcN8v_acosf; _ZGVdN8v_acosf; _ZGVeN16v_acosf; _ZGVbN4v_acosf; _ZGVcN8v_acosf; _ZGVdN8v_acosf; _ZGVeN16v_acosf;
_ZGVbN4v_asinf; _ZGVcN8v_asinf; _ZGVdN8v_asinf; _ZGVeN16v_asinf;
_ZGVbN4v_atanf; _ZGVcN8v_atanf; _ZGVdN8v_atanf; _ZGVeN16v_atanf; _ZGVbN4v_atanf; _ZGVcN8v_atanf; _ZGVdN8v_atanf; _ZGVeN16v_atanf;
} }
} }

View File

@ -93,6 +93,26 @@ float: 1
float128: 2 float128: 2
ldouble: 1 ldouble: 1
Function: "asin_vlen16":
float: 1
Function: "asin_vlen2":
double: 1
Function: "asin_vlen4":
double: 1
float: 1
Function: "asin_vlen4_avx2":
double: 1
Function: "asin_vlen8":
double: 1
float: 1
Function: "asin_vlen8_avx2":
float: 1
Function: "asinh": Function: "asinh":
double: 2 double: 2
float: 2 float: 2

View File

@ -0,0 +1,20 @@
/* SSE2 version of vectorized asin, vector length is 2.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define _ZGVbN2v_asin _ZGVbN2v_asin_sse2
#include "../svml_d_asin2_core.S"

View File

@ -0,0 +1,27 @@
/* Multiple versions of vectorized asin, vector length is 2.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define SYMBOL_NAME _ZGVbN2v_asin
#include "ifunc-mathvec-sse4_1.h"
libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
#ifdef SHARED
__hidden_ver1 (_ZGVbN2v_asin, __GI__ZGVbN2v_asin, __redirect__ZGVbN2v_asin)
__attribute__ ((visibility ("hidden")));
#endif

View File

@ -0,0 +1,288 @@
/* Function asin vectorized with SSE4.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
https://www.gnu.org/licenses/. */
/*
* ALGORITHM DESCRIPTION:
*
* SelMask = (|x| >= 0.5) ? 1 : 0;
* R = SelMask ? sqrt(0.5 - 0.5*|x|) : |x|
* asin(x) = (SelMask ? (Pi/2 - 2*Poly(R)) : Poly(R))*(-1)^sign(x)
*
*/
/* Offsets for data table __svml_dasin_data_internal
*/
#define AbsMask 0
#define OneHalf 16
#define SmallNorm 32
#define One 48
#define Two 64
#define sqrt_coeff 80
#define poly_coeff 144
#define Pi2H 336
#include <sysdep.h>
.text
.section .text.sse4,"ax",@progbits
ENTRY(_ZGVbN2v_asin_sse4)
subq $72, %rsp
cfi_def_cfa_offset(80)
movaps %xmm0, %xmm5
movups __svml_dasin_data_internal(%rip), %xmm3
movups OneHalf+__svml_dasin_data_internal(%rip), %xmm8
/* x = |arg| */
movaps %xmm3, %xmm4
andps %xmm5, %xmm4
/* Y = 0.5 - 0.5*x */
movaps %xmm8, %xmm6
mulpd %xmm4, %xmm6
movaps %xmm8, %xmm14
/* x^2 */
movaps %xmm4, %xmm2
subpd %xmm6, %xmm14
mulpd %xmm4, %xmm2
/* S ~ -2*sqrt(Y) */
cvtpd2ps %xmm14, %xmm9
minpd %xmm14, %xmm2
movlhps %xmm9, %xmm9
movaps %xmm14, %xmm15
rsqrtps %xmm9, %xmm10
cmpltpd SmallNorm+__svml_dasin_data_internal(%rip), %xmm15
addpd %xmm14, %xmm14
cvtps2pd %xmm10, %xmm11
andnps %xmm11, %xmm15
movaps %xmm4, %xmm1
movaps %xmm15, %xmm12
andnps %xmm5, %xmm3
mulpd %xmm15, %xmm12
mulpd %xmm14, %xmm15
mulpd %xmm12, %xmm14
cmpnltpd %xmm8, %xmm1
subpd Two+__svml_dasin_data_internal(%rip), %xmm14
/* polynomial */
movups poly_coeff+__svml_dasin_data_internal(%rip), %xmm6
movaps %xmm2, %xmm12
mulpd %xmm2, %xmm6
mulpd %xmm2, %xmm12
addpd poly_coeff+16+__svml_dasin_data_internal(%rip), %xmm6
movups One+__svml_dasin_data_internal(%rip), %xmm7
movaps %xmm12, %xmm8
cmpltpd %xmm4, %xmm7
mulpd %xmm12, %xmm6
movmskpd %xmm7, %edx
movups poly_coeff+32+__svml_dasin_data_internal(%rip), %xmm9
movaps %xmm14, %xmm0
movups poly_coeff+64+__svml_dasin_data_internal(%rip), %xmm7
mulpd %xmm2, %xmm9
mulpd %xmm2, %xmm7
addpd poly_coeff+48+__svml_dasin_data_internal(%rip), %xmm9
addpd poly_coeff+80+__svml_dasin_data_internal(%rip), %xmm7
mulpd %xmm12, %xmm8
mulpd %xmm12, %xmm7
addpd %xmm6, %xmm9
mulpd %xmm15, %xmm0
mulpd %xmm8, %xmm9
movups poly_coeff+96+__svml_dasin_data_internal(%rip), %xmm10
mulpd %xmm2, %xmm10
movups sqrt_coeff+__svml_dasin_data_internal(%rip), %xmm13
mulpd %xmm14, %xmm13
addpd poly_coeff+112+__svml_dasin_data_internal(%rip), %xmm10
addpd sqrt_coeff+16+__svml_dasin_data_internal(%rip), %xmm13
addpd %xmm7, %xmm10
mulpd %xmm14, %xmm13
addpd %xmm9, %xmm10
addpd sqrt_coeff+32+__svml_dasin_data_internal(%rip), %xmm13
mulpd %xmm12, %xmm10
mulpd %xmm13, %xmm14
movups poly_coeff+128+__svml_dasin_data_internal(%rip), %xmm11
mulpd %xmm2, %xmm11
addpd sqrt_coeff+48+__svml_dasin_data_internal(%rip), %xmm14
addpd poly_coeff+144+__svml_dasin_data_internal(%rip), %xmm11
mulpd %xmm14, %xmm0
addpd %xmm10, %xmm11
subpd %xmm15, %xmm0
mulpd %xmm11, %xmm12
movups poly_coeff+160+__svml_dasin_data_internal(%rip), %xmm13
movaps %xmm1, %xmm14
mulpd %xmm2, %xmm13
addpd poly_coeff+176+__svml_dasin_data_internal(%rip), %xmm13
addpd %xmm12, %xmm13
mulpd %xmm13, %xmm2
andnps %xmm4, %xmm14
andps %xmm1, %xmm0
orps %xmm0, %xmm14
mulpd %xmm14, %xmm2
addpd %xmm2, %xmm14
movups Pi2H+__svml_dasin_data_internal(%rip), %xmm0
andps %xmm1, %xmm0
addpd %xmm14, %xmm0
pxor %xmm3, %xmm0
testl %edx, %edx
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm5
/* Restore registers
* and exit the function
*/
L(EXIT):
addq $72, %rsp
cfi_def_cfa_offset(8)
ret
cfi_def_cfa_offset(80)
/* Branch to process
* special inputs
*/
L(SPECIAL_VALUES_BRANCH):
movups %xmm5, 32(%rsp)
movups %xmm0, 48(%rsp)
# LOE rbx rbp r12 r13 r14 r15 edx
xorl %eax, %eax
movq %r12, 16(%rsp)
cfi_offset(12, -64)
movl %eax, %r12d
movq %r13, 8(%rsp)
cfi_offset(13, -72)
movl %edx, %r13d
movq %r14, (%rsp)
cfi_offset(14, -80)
# LOE rbx rbp r15 r12d r13d
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx rbp r15 r12d r13d
/* Special inputs
* processing loop
*/
L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $2, %r12d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx rbp r15 r12d r13d
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
movups 48(%rsp), %xmm0
/* Go to exit */
jmp L(EXIT)
cfi_offset(12, -64)
cfi_offset(13, -72)
cfi_offset(14, -80)
# LOE rbx rbp r12 r13 r14 r15 xmm0
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movsd 32(%rsp,%r14,8), %xmm0
call asin@PLT
# LOE rbx rbp r14 r15 r12d r13d xmm0
movsd %xmm0, 48(%rsp,%r14,8)
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx rbp r15 r12d r13d
END(_ZGVbN2v_asin_sse4)
.section .rodata, "a"
.align 16
#ifdef __svml_dasin_data_internal_typedef
typedef unsigned int VUINT32;
typedef struct {
__declspec(align(16)) VUINT32 AbsMask[2][2];
__declspec(align(16)) VUINT32 OneHalf[2][2];
__declspec(align(16)) VUINT32 SmallNorm[2][2];
__declspec(align(16)) VUINT32 One[2][2];
__declspec(align(16)) VUINT32 Two[2][2];
__declspec(align(16)) VUINT32 sqrt_coeff[4][2][2];
__declspec(align(16)) VUINT32 poly_coeff[12][2][2];
__declspec(align(16)) VUINT32 Pi2H[2][2];
} __svml_dasin_data_internal;
#endif
__svml_dasin_data_internal:
/*== AbsMask ==*/
.quad 0x7fffffffffffffff, 0x7fffffffffffffff
/*== OneHalf ==*/
.align 16
.quad 0x3fe0000000000000, 0x3fe0000000000000
/*== SmallNorm ==*/
.align 16
.quad 0x3000000000000000, 0x3000000000000000
/*== One ==*/
.align 16
.quad 0x3ff0000000000000, 0x3ff0000000000000
/*== Two ==*/
.align 16
.quad 0x4000000000000000, 0x4000000000000000
/*== sqrt_coeff[4] ==*/
.align 16
.quad 0xbf918000993B24C3, 0xbf918000993B24C3 /* sqrt_coeff4 */
.quad 0x3fa400006F70D42D, 0x3fa400006F70D42D /* sqrt_coeff3 */
.quad 0xbfb7FFFFFFFFFE97, 0xbfb7FFFFFFFFFE97 /* sqrt_coeff2 */
.quad 0x3fcFFFFFFFFFFF9D, 0x3fcFFFFFFFFFFF9D /* sqrt_coeff1 */
/*== poly_coeff[12] ==*/
.align 16
.quad 0x3fa07520C70EB909, 0x3fa07520C70EB909 /* poly_coeff12 */
.quad 0xbf90FB17F7DBB0ED, 0xbf90FB17F7DBB0ED /* poly_coeff11 */
.quad 0x3f943F44BFBC3BAE, 0x3f943F44BFBC3BAE /* poly_coeff10 */
.quad 0x3f7A583395D45ED5, 0x3f7A583395D45ED5 /* poly_coeff9 */
.quad 0x3f88F8DC2AFCCAD6, 0x3f88F8DC2AFCCAD6 /* poly_coeff8 */
.quad 0x3f8C6DBBCB88BD57, 0x3f8C6DBBCB88BD57 /* poly_coeff7 */
.quad 0x3f91C6DCF538AD2E, 0x3f91C6DCF538AD2E /* poly_coeff6 */
.quad 0x3f96E89CEBDEFadd, 0x3f96E89CEBDEFadd /* poly_coeff5 */
.quad 0x3f9F1C72E13AD8BE, 0x3f9F1C72E13AD8BE /* poly_coeff4 */
.quad 0x3fa6DB6DB3B445F8, 0x3fa6DB6DB3B445F8 /* poly_coeff3 */
.quad 0x3fb333333337E0DE, 0x3fb333333337E0DE /* poly_coeff2 */
.quad 0x3fc555555555529C, 0x3fc555555555529C /* poly_coeff1 */
/*== Pi2H ==*/
.align 16
.quad 0x3ff921fb54442d18, 0x3ff921fb54442d18
.align 16
.type __svml_dasin_data_internal,@object
.size __svml_dasin_data_internal,.-__svml_dasin_data_internal

View File

@ -0,0 +1,20 @@
/* SSE version of vectorized asin, vector length is 4.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define _ZGVdN4v_asin _ZGVdN4v_asin_sse_wrapper
#include "../svml_d_asin4_core.S"

View File

@ -0,0 +1,27 @@
/* Multiple versions of vectorized asin, vector length is 4.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define SYMBOL_NAME _ZGVdN4v_asin
#include "ifunc-mathvec-avx2.h"
libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
#ifdef SHARED
__hidden_ver1 (_ZGVdN4v_asin, __GI__ZGVdN4v_asin, __redirect__ZGVdN4v_asin)
__attribute__ ((visibility ("hidden")));
#endif

View File

@ -0,0 +1,273 @@
/* Function asin vectorized with AVX2.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
https://www.gnu.org/licenses/. */
/*
* ALGORITHM DESCRIPTION:
*
* SelMask = (|x| >= 0.5) ? 1 : 0;
* R = SelMask ? sqrt(0.5 - 0.5*|x|) : |x|
* asin(x) = (SelMask ? (Pi/2 - 2*Poly(R)) : Poly(R))*(-1)^sign(x)
*
*/
/* Offsets for data table __svml_dasin_data_internal
*/
#define AbsMask 0
#define OneHalf 32
#define SmallNorm 64
#define One 96
#define Two 128
#define sqrt_coeff 160
#define poly_coeff 288
#define Pi2H 672
#include <sysdep.h>
.text
.section .text.avx2,"ax",@progbits
ENTRY(_ZGVdN4v_asin_avx2)
pushq %rbp
cfi_def_cfa_offset(16)
movq %rsp, %rbp
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
andq $-32, %rsp
subq $96, %rsp
vmovupd __svml_dasin_data_internal(%rip), %ymm6
vmovupd OneHalf+__svml_dasin_data_internal(%rip), %ymm10
vmovupd One+__svml_dasin_data_internal(%rip), %ymm8
vmovapd %ymm0, %ymm5
/* x = |arg| */
vandpd %ymm5, %ymm6, %ymm4
/* Y = 0.5 - 0.5*x */
vmovapd %ymm10, %ymm15
vfnmadd231pd %ymm4, %ymm10, %ymm15
/* x^2 */
vmulpd %ymm4, %ymm4, %ymm7
vcmplt_oqpd %ymm4, %ymm8, %ymm9
/* S ~ -2*sqrt(Y) */
vcmplt_oqpd SmallNorm+__svml_dasin_data_internal(%rip), %ymm15, %ymm13
vminpd %ymm15, %ymm7, %ymm2
vaddpd %ymm15, %ymm15, %ymm7
vcmpnlt_uqpd %ymm10, %ymm4, %ymm1
vcvtpd2ps %ymm15, %xmm11
vmovupd poly_coeff+64+__svml_dasin_data_internal(%rip), %ymm10
vmulpd %ymm2, %ymm2, %ymm15
vrsqrtps %xmm11, %xmm12
vmovupd poly_coeff+192+__svml_dasin_data_internal(%rip), %ymm11
vfmadd213pd poly_coeff+96+__svml_dasin_data_internal(%rip), %ymm2, %ymm10
vcvtps2pd %xmm12, %ymm14
vmulpd %ymm15, %ymm15, %ymm12
vfmadd213pd poly_coeff+224+__svml_dasin_data_internal(%rip), %ymm2, %ymm11
vandnpd %ymm14, %ymm13, %ymm0
vandnpd %ymm5, %ymm6, %ymm3
vmulpd %ymm0, %ymm0, %ymm6
vmovupd poly_coeff+128+__svml_dasin_data_internal(%rip), %ymm13
vmovupd poly_coeff+256+__svml_dasin_data_internal(%rip), %ymm14
vfmadd213pd poly_coeff+160+__svml_dasin_data_internal(%rip), %ymm2, %ymm13
vfmadd213pd poly_coeff+288+__svml_dasin_data_internal(%rip), %ymm2, %ymm14
vfmadd213pd %ymm11, %ymm15, %ymm13
vmovmskpd %ymm9, %edx
vmulpd %ymm7, %ymm0, %ymm9
vfmsub213pd Two+__svml_dasin_data_internal(%rip), %ymm6, %ymm7
/* polynomial */
vmovupd poly_coeff+__svml_dasin_data_internal(%rip), %ymm6
vmovupd sqrt_coeff+__svml_dasin_data_internal(%rip), %ymm0
vmulpd %ymm7, %ymm9, %ymm8
vfmadd213pd poly_coeff+32+__svml_dasin_data_internal(%rip), %ymm2, %ymm6
vfmadd213pd sqrt_coeff+32+__svml_dasin_data_internal(%rip), %ymm7, %ymm0
vfmadd213pd %ymm10, %ymm15, %ymm6
vmovupd poly_coeff+320+__svml_dasin_data_internal(%rip), %ymm10
vfmadd213pd sqrt_coeff+64+__svml_dasin_data_internal(%rip), %ymm7, %ymm0
vfmadd213pd %ymm13, %ymm12, %ymm6
vfmadd213pd poly_coeff+352+__svml_dasin_data_internal(%rip), %ymm2, %ymm10
vfmadd213pd sqrt_coeff+96+__svml_dasin_data_internal(%rip), %ymm7, %ymm0
vfmadd213pd %ymm14, %ymm15, %ymm6
vfmsub213pd %ymm9, %ymm8, %ymm0
vfmadd213pd %ymm10, %ymm15, %ymm6
vblendvpd %ymm1, %ymm0, %ymm4, %ymm4
vmulpd %ymm6, %ymm2, %ymm2
vfmadd213pd %ymm4, %ymm4, %ymm2
vandpd Pi2H+__svml_dasin_data_internal(%rip), %ymm1, %ymm1
vaddpd %ymm2, %ymm1, %ymm0
vxorpd %ymm3, %ymm0, %ymm0
testl %edx, %edx
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx r12 r13 r14 r15 edx ymm0 ymm5
/* Restore registers
* and exit the function
*/
L(EXIT):
movq %rbp, %rsp
popq %rbp
cfi_def_cfa(7, 8)
cfi_restore(6)
ret
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
/* Branch to process
* special inputs
*/
L(SPECIAL_VALUES_BRANCH):
vmovupd %ymm5, 32(%rsp)
vmovupd %ymm0, 64(%rsp)
# LOE rbx r12 r13 r14 r15 edx ymm0
xorl %eax, %eax
# LOE rbx r12 r13 r14 r15 eax edx
vzeroupper
movq %r12, 16(%rsp)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
movl %eax, %r12d
movq %r13, 8(%rsp)
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
movl %edx, %r13d
movq %r14, (%rsp)
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
# LOE rbx r15 r12d r13d
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx r15 r12d r13d
/* Special inputs
* processing loop
*/
L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $4, %r12d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx r15 r12d r13d
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
vmovupd 64(%rsp), %ymm0
/* Go to exit */
jmp L(EXIT)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
# LOE rbx r12 r13 r14 r15 ymm0
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movsd 32(%rsp,%r14,8), %xmm0
call asin@PLT
# LOE rbx r14 r15 r12d r13d xmm0
movsd %xmm0, 64(%rsp,%r14,8)
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx r15 r12d r13d
END(_ZGVdN4v_asin_avx2)
.section .rodata, "a"
.align 32
#ifdef __svml_dasin_data_internal_typedef
typedef unsigned int VUINT32;
typedef struct {
__declspec(align(32)) VUINT32 AbsMask[4][2];
__declspec(align(32)) VUINT32 OneHalf[4][2];
__declspec(align(32)) VUINT32 SmallNorm[4][2];
__declspec(align(32)) VUINT32 One[4][2];
__declspec(align(32)) VUINT32 Two[4][2];
__declspec(align(32)) VUINT32 sqrt_coeff[4][4][2];
__declspec(align(32)) VUINT32 poly_coeff[12][4][2];
__declspec(align(32)) VUINT32 Pi2H[4][2];
} __svml_dasin_data_internal;
#endif
__svml_dasin_data_internal:
/*== AbsMask ==*/
.quad 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff
/*== OneHalf ==*/
.align 32
.quad 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000
/*== SmallNorm ==*/
.align 32
.quad 0x3000000000000000, 0x3000000000000000, 0x3000000000000000, 0x3000000000000000
/*== One ==*/
.align 32
.quad 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000
/*== Two ==*/
.align 32
.quad 0x4000000000000000, 0x4000000000000000, 0x4000000000000000, 0x4000000000000000
/*== sqrt_coeff[4] ==*/
.align 32
.quad 0xbf918000993B24C3, 0xbf918000993B24C3, 0xbf918000993B24C3, 0xbf918000993B24C3 /* sqrt_coeff4 */
.quad 0x3fa400006F70D42D, 0x3fa400006F70D42D, 0x3fa400006F70D42D, 0x3fa400006F70D42D /* sqrt_coeff3 */
.quad 0xbfb7FFFFFFFFFE97, 0xbfb7FFFFFFFFFE97, 0xbfb7FFFFFFFFFE97, 0xbfb7FFFFFFFFFE97 /* sqrt_coeff2 */
.quad 0x3fcFFFFFFFFFFF9D, 0x3fcFFFFFFFFFFF9D, 0x3fcFFFFFFFFFFF9D, 0x3fcFFFFFFFFFFF9D /* sqrt_coeff1 */
/*== poly_coeff[12] ==*/
.align 32
.quad 0x3fa07520C70EB909, 0x3fa07520C70EB909, 0x3fa07520C70EB909, 0x3fa07520C70EB909 /* poly_coeff12 */
.quad 0xbf90FB17F7DBB0ED, 0xbf90FB17F7DBB0ED, 0xbf90FB17F7DBB0ED, 0xbf90FB17F7DBB0ED /* poly_coeff11 */
.quad 0x3f943F44BFBC3BAE, 0x3f943F44BFBC3BAE, 0x3f943F44BFBC3BAE, 0x3f943F44BFBC3BAE /* poly_coeff10 */
.quad 0x3f7A583395D45ED5, 0x3f7A583395D45ED5, 0x3f7A583395D45ED5, 0x3f7A583395D45ED5 /* poly_coeff9 */
.quad 0x3f88F8DC2AFCCAD6, 0x3f88F8DC2AFCCAD6, 0x3f88F8DC2AFCCAD6, 0x3f88F8DC2AFCCAD6 /* poly_coeff8 */
.quad 0x3f8C6DBBCB88BD57, 0x3f8C6DBBCB88BD57, 0x3f8C6DBBCB88BD57, 0x3f8C6DBBCB88BD57 /* poly_coeff7 */
.quad 0x3f91C6DCF538AD2E, 0x3f91C6DCF538AD2E, 0x3f91C6DCF538AD2E, 0x3f91C6DCF538AD2E /* poly_coeff6 */
.quad 0x3f96E89CEBDEFadd, 0x3f96E89CEBDEFadd, 0x3f96E89CEBDEFadd, 0x3f96E89CEBDEFadd /* poly_coeff5 */
.quad 0x3f9F1C72E13AD8BE, 0x3f9F1C72E13AD8BE, 0x3f9F1C72E13AD8BE, 0x3f9F1C72E13AD8BE /* poly_coeff4 */
.quad 0x3fa6DB6DB3B445F8, 0x3fa6DB6DB3B445F8, 0x3fa6DB6DB3B445F8, 0x3fa6DB6DB3B445F8 /* poly_coeff3 */
.quad 0x3fb333333337E0DE, 0x3fb333333337E0DE, 0x3fb333333337E0DE, 0x3fb333333337E0DE /* poly_coeff2 */
.quad 0x3fc555555555529C, 0x3fc555555555529C, 0x3fc555555555529C, 0x3fc555555555529C /* poly_coeff1 */
/*== Pi2H ==*/
.align 32
.quad 0x3ff921fb54442d18, 0x3ff921fb54442d18, 0x3ff921fb54442d18, 0x3ff921fb54442d18
.align 32
.type __svml_dasin_data_internal,@object
.size __svml_dasin_data_internal,.-__svml_dasin_data_internal

View File

@ -0,0 +1,20 @@
/* AVX2 version of vectorized asin, vector length is 8.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define _ZGVeN8v_asin _ZGVeN8v_asin_avx2_wrapper
#include "../svml_d_asin8_core.S"

View File

@ -0,0 +1,27 @@
/* Multiple versions of vectorized asin, vector length is 8.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define SYMBOL_NAME _ZGVeN8v_asin
#include "ifunc-mathvec-avx512-skx.h"
libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
#ifdef SHARED
__hidden_ver1 (_ZGVeN8v_asin, __GI__ZGVeN8v_asin, __redirect__ZGVeN8v_asin)
__attribute__ ((visibility ("hidden")));
#endif

View File

@ -0,0 +1,295 @@
/* Function asin vectorized with AVX-512.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
https://www.gnu.org/licenses/. */
/*
* ALGORITHM DESCRIPTION:
*
* SelMask = (|x| >= 0.5) ? 1 : 0;
* R = SelMask ? sqrt(0.5 - 0.5*|x|) : |x|
* asin(x) = (SelMask ? (Pi/2 - 2*Poly(R)) : Poly(R))*(-1)^sign(x)
*
*/
/* Offsets for data table __svml_dasin_data_internal
*/
#define AbsMask 0
#define OneHalf 64
#define SmallNorm 128
#define One 192
#define Two 256
#define sqrt_coeff_1 320
#define sqrt_coeff_2 384
#define sqrt_coeff_3 448
#define sqrt_coeff_4 512
#define poly_coeff_1 576
#define poly_coeff_2 640
#define poly_coeff_3 704
#define poly_coeff_4 768
#define poly_coeff_5 832
#define poly_coeff_6 896
#define poly_coeff_7 960
#define poly_coeff_8 1024
#define poly_coeff_9 1088
#define poly_coeff_10 1152
#define poly_coeff_11 1216
#define poly_coeff_12 1280
#define Pi2H 1344
#include <sysdep.h>
.text
.section .text.evex512,"ax",@progbits
ENTRY(_ZGVeN8v_asin_skx)
pushq %rbp
cfi_def_cfa_offset(16)
movq %rsp, %rbp
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
andq $-64, %rsp
subq $192, %rsp
vmovups OneHalf+__svml_dasin_data_internal(%rip), %zmm8
/* S ~ -2*sqrt(Y) */
vmovups SmallNorm+__svml_dasin_data_internal(%rip), %zmm10
vmovups Two+__svml_dasin_data_internal(%rip), %zmm14
vmovups sqrt_coeff_1+__svml_dasin_data_internal(%rip), %zmm15
vmovups sqrt_coeff_2+__svml_dasin_data_internal(%rip), %zmm2
vmovups sqrt_coeff_3+__svml_dasin_data_internal(%rip), %zmm1
vmovups One+__svml_dasin_data_internal(%rip), %zmm9
vmovaps %zmm0, %zmm6
/* x = |arg| */
vandpd __svml_dasin_data_internal(%rip), %zmm6, %zmm4
/* Y = 0.5 - 0.5*x */
vmovaps %zmm8, %zmm11
vfnmadd231pd {rn-sae}, %zmm4, %zmm8, %zmm11
/* x^2 */
vmulpd {rn-sae}, %zmm4, %zmm4, %zmm7
vrsqrt14pd %zmm11, %zmm12
vcmppd $17, {sae}, %zmm10, %zmm11, %k1
vcmppd $21, {sae}, %zmm8, %zmm4, %k2
vcmppd $17, {sae}, %zmm4, %zmm9, %k0
vmovups poly_coeff_5+__svml_dasin_data_internal(%rip), %zmm10
/* polynomial */
vmovups poly_coeff_1+__svml_dasin_data_internal(%rip), %zmm8
vmovups poly_coeff_3+__svml_dasin_data_internal(%rip), %zmm9
vminpd {sae}, %zmm11, %zmm7, %zmm3
vxorpd %zmm12, %zmm12, %zmm12{%k1}
vaddpd {rn-sae}, %zmm11, %zmm11, %zmm0
vxorpd %zmm6, %zmm4, %zmm5
vmulpd {rn-sae}, %zmm12, %zmm12, %zmm13
vmulpd {rn-sae}, %zmm12, %zmm0, %zmm7
vmovups poly_coeff_7+__svml_dasin_data_internal(%rip), %zmm11
vmovups poly_coeff_4+__svml_dasin_data_internal(%rip), %zmm12
vfmsub213pd {rn-sae}, %zmm14, %zmm13, %zmm0
vmovups sqrt_coeff_4+__svml_dasin_data_internal(%rip), %zmm13
vfmadd231pd {rn-sae}, %zmm3, %zmm9, %zmm12
vmovups poly_coeff_11+__svml_dasin_data_internal(%rip), %zmm9
vfmadd231pd {rn-sae}, %zmm0, %zmm15, %zmm2
vmovups poly_coeff_9+__svml_dasin_data_internal(%rip), %zmm15
vmulpd {rn-sae}, %zmm0, %zmm7, %zmm14
vfmadd213pd {rn-sae}, %zmm1, %zmm0, %zmm2
vmovups poly_coeff_2+__svml_dasin_data_internal(%rip), %zmm1
kmovw %k0, %edx
vfmadd213pd {rn-sae}, %zmm13, %zmm0, %zmm2
vfmadd231pd {rn-sae}, %zmm3, %zmm8, %zmm1
vmovups poly_coeff_10+__svml_dasin_data_internal(%rip), %zmm8
vmulpd {rn-sae}, %zmm3, %zmm3, %zmm0
vfmsub213pd {rn-sae}, %zmm7, %zmm14, %zmm2
vmovups poly_coeff_6+__svml_dasin_data_internal(%rip), %zmm7
vfmadd231pd {rn-sae}, %zmm3, %zmm15, %zmm8
vfmadd213pd {rn-sae}, %zmm12, %zmm0, %zmm1
vblendmpd %zmm2, %zmm4, %zmm2{%k2}
vfmadd231pd {rn-sae}, %zmm3, %zmm10, %zmm7
vmovups poly_coeff_8+__svml_dasin_data_internal(%rip), %zmm10
vmovups Pi2H+__svml_dasin_data_internal(%rip), %zmm4
vfmadd231pd {rn-sae}, %zmm3, %zmm11, %zmm10
vmovups poly_coeff_12+__svml_dasin_data_internal(%rip), %zmm11
vfmadd213pd {rn-sae}, %zmm10, %zmm0, %zmm7
vfmadd231pd {rn-sae}, %zmm3, %zmm9, %zmm11
vmulpd {rn-sae}, %zmm0, %zmm0, %zmm10
vfmadd213pd {rn-sae}, %zmm7, %zmm10, %zmm1
vfmadd213pd {rn-sae}, %zmm8, %zmm0, %zmm1
vfmadd213pd {rn-sae}, %zmm11, %zmm0, %zmm1
vmulpd {rn-sae}, %zmm3, %zmm1, %zmm3
vfmadd213pd {rn-sae}, %zmm2, %zmm2, %zmm3
vaddpd {rn-sae}, %zmm4, %zmm3, %zmm3{%k2}
vxorpd %zmm5, %zmm3, %zmm0
testl %edx, %edx
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx r12 r13 r14 r15 edx zmm0 zmm6
/* Restore registers
* and exit the function
*/
L(EXIT):
movq %rbp, %rsp
popq %rbp
cfi_def_cfa(7, 8)
cfi_restore(6)
ret
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
/* Branch to process
* special inputs
*/
L(SPECIAL_VALUES_BRANCH):
vmovups %zmm6, 64(%rsp)
vmovups %zmm0, 128(%rsp)
# LOE rbx r12 r13 r14 r15 edx zmm0
xorl %eax, %eax
# LOE rbx r12 r13 r14 r15 eax edx
vzeroupper
movq %r12, 16(%rsp)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
movl %eax, %r12d
movq %r13, 8(%rsp)
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
movl %edx, %r13d
movq %r14, (%rsp)
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r15 r12d r13d
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx r15 r12d r13d
/* Special inputs
* processing loop
*/
L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $8, %r12d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx r15 r12d r13d
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
vmovups 128(%rsp), %zmm0
/* Go to exit */
jmp L(EXIT)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r12 r13 r14 r15 zmm0
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movsd 64(%rsp,%r14,8), %xmm0
call asin@PLT
# LOE rbx r14 r15 r12d r13d xmm0
movsd %xmm0, 128(%rsp,%r14,8)
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx r15 r12d r13d
END(_ZGVeN8v_asin_skx)
.section .rodata, "a"
.align 64
#ifdef __svml_dasin_data_internal_typedef
typedef unsigned int VUINT32;
typedef struct {
__declspec(align(64)) VUINT32 AbsMask[8][2];
__declspec(align(64)) VUINT32 OneHalf[8][2];
__declspec(align(64)) VUINT32 SmallNorm[8][2];
__declspec(align(64)) VUINT32 One[8][2];
__declspec(align(64)) VUINT32 Two[8][2];
__declspec(align(64)) VUINT32 sqrt_coeff[4][8][2];
__declspec(align(64)) VUINT32 poly_coeff[12][8][2];
__declspec(align(64)) VUINT32 Pi2H[8][2];
} __svml_dasin_data_internal;
#endif
__svml_dasin_data_internal:
/*== AbsMask ==*/
.quad 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff
/*== OneHalf ==*/
.align 64
.quad 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000
/*== SmallNorm ==*/
.align 64
.quad 0x3000000000000000, 0x3000000000000000, 0x3000000000000000, 0x3000000000000000, 0x3000000000000000, 0x3000000000000000, 0x3000000000000000, 0x3000000000000000
/*== One ==*/
.align 64
.quad 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000
/*== Two ==*/
.align 64
.quad 0x4000000000000000, 0x4000000000000000, 0x4000000000000000, 0x4000000000000000, 0x4000000000000000, 0x4000000000000000, 0x4000000000000000, 0x4000000000000000
/*== sqrt_coeff[4] ==*/
.align 64
.quad 0xbf918000993B24C3, 0xbf918000993B24C3, 0xbf918000993B24C3, 0xbf918000993B24C3, 0xbf918000993B24C3, 0xbf918000993B24C3, 0xbf918000993B24C3, 0xbf918000993B24C3 /* sqrt_coeff4 */
.quad 0x3fa400006F70D42D, 0x3fa400006F70D42D, 0x3fa400006F70D42D, 0x3fa400006F70D42D, 0x3fa400006F70D42D, 0x3fa400006F70D42D, 0x3fa400006F70D42D, 0x3fa400006F70D42D /* sqrt_coeff3 */
.quad 0xbfb7FFFFFFFFFE97, 0xbfb7FFFFFFFFFE97, 0xbfb7FFFFFFFFFE97, 0xbfb7FFFFFFFFFE97, 0xbfb7FFFFFFFFFE97, 0xbfb7FFFFFFFFFE97, 0xbfb7FFFFFFFFFE97, 0xbfb7FFFFFFFFFE97 /* sqrt_coeff2 */
.quad 0x3fcFFFFFFFFFFF9D, 0x3fcFFFFFFFFFFF9D, 0x3fcFFFFFFFFFFF9D, 0x3fcFFFFFFFFFFF9D, 0x3fcFFFFFFFFFFF9D, 0x3fcFFFFFFFFFFF9D, 0x3fcFFFFFFFFFFF9D, 0x3fcFFFFFFFFFFF9D /* sqrt_coeff1 */
/*== poly_coeff[12] ==*/
.align 64
.quad 0x3fa07520C70EB909, 0x3fa07520C70EB909, 0x3fa07520C70EB909, 0x3fa07520C70EB909, 0x3fa07520C70EB909, 0x3fa07520C70EB909, 0x3fa07520C70EB909, 0x3fa07520C70EB909 /* poly_coeff12 */
.quad 0xbf90FB17F7DBB0ED, 0xbf90FB17F7DBB0ED, 0xbf90FB17F7DBB0ED, 0xbf90FB17F7DBB0ED, 0xbf90FB17F7DBB0ED, 0xbf90FB17F7DBB0ED, 0xbf90FB17F7DBB0ED, 0xbf90FB17F7DBB0ED /* poly_coeff11 */
.quad 0x3f943F44BFBC3BAE, 0x3f943F44BFBC3BAE, 0x3f943F44BFBC3BAE, 0x3f943F44BFBC3BAE, 0x3f943F44BFBC3BAE, 0x3f943F44BFBC3BAE, 0x3f943F44BFBC3BAE, 0x3f943F44BFBC3BAE /* poly_coeff10 */
.quad 0x3f7A583395D45ED5, 0x3f7A583395D45ED5, 0x3f7A583395D45ED5, 0x3f7A583395D45ED5, 0x3f7A583395D45ED5, 0x3f7A583395D45ED5, 0x3f7A583395D45ED5, 0x3f7A583395D45ED5 /* poly_coeff9 */
.quad 0x3f88F8DC2AFCCAD6, 0x3f88F8DC2AFCCAD6, 0x3f88F8DC2AFCCAD6, 0x3f88F8DC2AFCCAD6, 0x3f88F8DC2AFCCAD6, 0x3f88F8DC2AFCCAD6, 0x3f88F8DC2AFCCAD6, 0x3f88F8DC2AFCCAD6 /* poly_coeff8 */
.quad 0x3f8C6DBBCB88BD57, 0x3f8C6DBBCB88BD57, 0x3f8C6DBBCB88BD57, 0x3f8C6DBBCB88BD57, 0x3f8C6DBBCB88BD57, 0x3f8C6DBBCB88BD57, 0x3f8C6DBBCB88BD57, 0x3f8C6DBBCB88BD57 /* poly_coeff7 */
.quad 0x3f91C6DCF538AD2E, 0x3f91C6DCF538AD2E, 0x3f91C6DCF538AD2E, 0x3f91C6DCF538AD2E, 0x3f91C6DCF538AD2E, 0x3f91C6DCF538AD2E, 0x3f91C6DCF538AD2E, 0x3f91C6DCF538AD2E /* poly_coeff6 */
.quad 0x3f96E89CEBDEFadd, 0x3f96E89CEBDEFadd, 0x3f96E89CEBDEFadd, 0x3f96E89CEBDEFadd, 0x3f96E89CEBDEFadd, 0x3f96E89CEBDEFadd, 0x3f96E89CEBDEFadd, 0x3f96E89CEBDEFadd /* poly_coeff5 */
.quad 0x3f9F1C72E13AD8BE, 0x3f9F1C72E13AD8BE, 0x3f9F1C72E13AD8BE, 0x3f9F1C72E13AD8BE, 0x3f9F1C72E13AD8BE, 0x3f9F1C72E13AD8BE, 0x3f9F1C72E13AD8BE, 0x3f9F1C72E13AD8BE /* poly_coeff4 */
.quad 0x3fa6DB6DB3B445F8, 0x3fa6DB6DB3B445F8, 0x3fa6DB6DB3B445F8, 0x3fa6DB6DB3B445F8, 0x3fa6DB6DB3B445F8, 0x3fa6DB6DB3B445F8, 0x3fa6DB6DB3B445F8, 0x3fa6DB6DB3B445F8 /* poly_coeff3 */
.quad 0x3fb333333337E0DE, 0x3fb333333337E0DE, 0x3fb333333337E0DE, 0x3fb333333337E0DE, 0x3fb333333337E0DE, 0x3fb333333337E0DE, 0x3fb333333337E0DE, 0x3fb333333337E0DE /* poly_coeff2 */
.quad 0x3fc555555555529C, 0x3fc555555555529C, 0x3fc555555555529C, 0x3fc555555555529C, 0x3fc555555555529C, 0x3fc555555555529C, 0x3fc555555555529C, 0x3fc555555555529C /* poly_coeff1 */
/*== Pi2H ==*/
.align 64
.quad 0x3ff921fb54442d18, 0x3ff921fb54442d18, 0x3ff921fb54442d18, 0x3ff921fb54442d18, 0x3ff921fb54442d18, 0x3ff921fb54442d18, 0x3ff921fb54442d18, 0x3ff921fb54442d18
.align 64
.type __svml_dasin_data_internal,@object
.size __svml_dasin_data_internal,.-__svml_dasin_data_internal

View File

@ -0,0 +1,20 @@
/* AVX2 version of vectorized asinf.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define _ZGVeN16v_asinf _ZGVeN16v_asinf_avx2_wrapper
#include "../svml_s_asinf16_core.S"

View File

@ -0,0 +1,28 @@
/* Multiple versions of vectorized asinf, vector length is 16.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define SYMBOL_NAME _ZGVeN16v_asinf
#include "ifunc-mathvec-avx512-skx.h"
libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
#ifdef SHARED
__hidden_ver1 (_ZGVeN16v_asinf, __GI__ZGVeN16v_asinf,
__redirect__ZGVeN16v_asinf)
__attribute__ ((visibility ("hidden")));
#endif

View File

@ -0,0 +1,260 @@
/* Function asinf vectorized with AVX-512.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
https://www.gnu.org/licenses/. */
/*
* ALGORITHM DESCRIPTION:
*
* SelMask = (|x| >= 0.5) ? 1 : 0;
* R = SelMask ? sqrt(0.5 - 0.5*|x|) : |x|
* asin(x) = (SelMask ? (Pi/2 - 2*Poly(R)) : Poly(R))*(-1)^sign(x)
*
*
*/
/* Offsets for data table __svml_sasin_data_internal
*/
#define AbsMask 0
#define OneHalf 64
#define SmallNorm 128
#define One 192
#define Two 256
#define sqrt_coeff_1 320
#define sqrt_coeff_2 384
#define poly_coeff_1 448
#define poly_coeff_2 512
#define poly_coeff_3 576
#define poly_coeff_4 640
#define poly_coeff_5 704
#define Pi2H 768
#include <sysdep.h>
.text
.section .text.exex512,"ax",@progbits
ENTRY(_ZGVeN16v_asinf_skx)
pushq %rbp
cfi_def_cfa_offset(16)
movq %rsp, %rbp
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
andq $-64, %rsp
subq $192, %rsp
vmovups __svml_sasin_data_internal(%rip), %zmm4
vmovups OneHalf+__svml_sasin_data_internal(%rip), %zmm6
/* SQ ~ -2*sqrt(Y) */
vmovups SmallNorm+__svml_sasin_data_internal(%rip), %zmm8
vmovups Two+__svml_sasin_data_internal(%rip), %zmm12
vmovups sqrt_coeff_1+__svml_sasin_data_internal(%rip), %zmm13
vmovups One+__svml_sasin_data_internal(%rip), %zmm7
vmovaps %zmm0, %zmm3
/* x = |arg| */
vandps %zmm3, %zmm4, %zmm2
vandnps %zmm3, %zmm4, %zmm1
/* x^2 */
vmulps {rn-sae}, %zmm2, %zmm2, %zmm5
vcmpps $17, {sae}, %zmm2, %zmm7, %k0
vcmpps $21, {sae}, %zmm6, %zmm2, %k2
vmovups poly_coeff_2+__svml_sasin_data_internal(%rip), %zmm7
kmovw %k0, %edx
/* Y = 0.5 - 0.5*x */
vmovaps %zmm6, %zmm9
vfnmadd231ps {rn-sae}, %zmm2, %zmm6, %zmm9
vmovups poly_coeff_5+__svml_sasin_data_internal(%rip), %zmm6
vrsqrt14ps %zmm9, %zmm10
vcmpps $17, {sae}, %zmm8, %zmm9, %k1
vminps {sae}, %zmm9, %zmm5, %zmm0
vmovups sqrt_coeff_2+__svml_sasin_data_internal(%rip), %zmm8
vmovups poly_coeff_4+__svml_sasin_data_internal(%rip), %zmm5
vxorps %zmm10, %zmm10, %zmm10{%k1}
vaddps {rn-sae}, %zmm9, %zmm9, %zmm14
vmulps {rn-sae}, %zmm10, %zmm10, %zmm11
vmulps {rn-sae}, %zmm10, %zmm14, %zmm4
vfmsub213ps {rn-sae}, %zmm12, %zmm11, %zmm14
vmulps {rn-sae}, %zmm14, %zmm4, %zmm15
vfmadd231ps {rn-sae}, %zmm14, %zmm13, %zmm8
vmovups poly_coeff_3+__svml_sasin_data_internal(%rip), %zmm14
/* polynomial */
vmovups poly_coeff_1+__svml_sasin_data_internal(%rip), %zmm13
vfmsub213ps {rn-sae}, %zmm4, %zmm15, %zmm8
vfmadd231ps {rn-sae}, %zmm0, %zmm14, %zmm5
vfmadd231ps {rn-sae}, %zmm0, %zmm13, %zmm7
vmulps {rn-sae}, %zmm0, %zmm0, %zmm15
vblendmps %zmm8, %zmm2, %zmm2{%k2}
vfmadd213ps {rn-sae}, %zmm5, %zmm15, %zmm7
vfmadd213ps {rn-sae}, %zmm6, %zmm0, %zmm7
vmulps {rn-sae}, %zmm0, %zmm7, %zmm9
vmovups Pi2H+__svml_sasin_data_internal(%rip), %zmm0
vfmadd213ps {rn-sae}, %zmm2, %zmm2, %zmm9
vaddps {rn-sae}, %zmm0, %zmm9, %zmm9{%k2}
vxorps %zmm1, %zmm9, %zmm0
testl %edx, %edx
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx r12 r13 r14 r15 edx zmm0 zmm3
/* Restore registers
* and exit the function
*/
L(EXIT):
movq %rbp, %rsp
popq %rbp
cfi_def_cfa(7, 8)
cfi_restore(6)
ret
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
/* Branch to process
* special inputs
*/
L(SPECIAL_VALUES_BRANCH):
vmovups %zmm3, 64(%rsp)
vmovups %zmm0, 128(%rsp)
# LOE rbx r12 r13 r14 r15 edx zmm0
xorl %eax, %eax
# LOE rbx r12 r13 r14 r15 eax edx
vzeroupper
movq %r12, 16(%rsp)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
movl %eax, %r12d
movq %r13, 8(%rsp)
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
movl %edx, %r13d
movq %r14, (%rsp)
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r15 r12d r13d
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx r15 r12d r13d
/* Special inputs
* processing loop
*/
L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $16, %r12d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx r15 r12d r13d
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
vmovups 128(%rsp), %zmm0
/* Go to exit */
jmp L(EXIT)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r12 r13 r14 r15 zmm0
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movss 64(%rsp,%r14,4), %xmm0
call asinf@PLT
# LOE rbx r14 r15 r12d r13d xmm0
movss %xmm0, 128(%rsp,%r14,4)
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx r15 r12d r13d
END(_ZGVeN16v_asinf_skx)
.section .rodata, "a"
.align 64
#ifdef __svml_sasin_data_internal_typedef
typedef unsigned int VUINT32;
typedef struct {
__declspec(align(64)) VUINT32 AbsMask[16][1];
__declspec(align(64)) VUINT32 OneHalf[16][1];
__declspec(align(64)) VUINT32 SmallNorm[16][1];
__declspec(align(64)) VUINT32 One[16][1];
__declspec(align(64)) VUINT32 Two[16][1];
__declspec(align(64)) VUINT32 sqrt_coeff[2][16][1];
__declspec(align(64)) VUINT32 poly_coeff[5][16][1];
__declspec(align(64)) VUINT32 Pi2H[16][1];
} __svml_sasin_data_internal;
#endif
__svml_sasin_data_internal:
/*== AbsMask ==*/
.long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff
/*== OneHalf ==*/
.align 64
.long 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000
/*== SmallNorm ==*/
.align 64
.long 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000
/*== One ==*/
.align 64
.long 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000
/*== Two ==*/
.align 64
.long 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000
/*== sqrt_coeff[2] ==*/
.align 64
.long 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004 /* sqrt_coeff2 */
.long 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001 /* sqrt_coeff1 */
/*== poly_coeff[5] ==*/
.align 64
.long 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07 /* poly_coeff5 */
.long 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B /* poly_coeff4 */
.long 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4 /* poly_coeff3 */
.long 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12 /* poly_coeff2 */
.long 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF /* poly_coeff1 */
/*== Pi2H ==*/
.align 64
.long 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB
.align 64
.type __svml_sasin_data_internal,@object
.size __svml_sasin_data_internal,.-__svml_sasin_data_internal

View File

@ -0,0 +1,20 @@
/* SSE2 version of vectorized asinf, vector length is 4.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define _ZGVbN4v_asinf _ZGVbN4v_asinf_sse2
#include "../svml_s_asinf4_core.S"

View File

@ -0,0 +1,28 @@
/* Multiple versions of vectorized asinf, vector length is 4.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define SYMBOL_NAME _ZGVbN4v_asinf
#include "ifunc-mathvec-sse4_1.h"
libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
#ifdef SHARED
__hidden_ver1 (_ZGVbN4v_asinf, __GI__ZGVbN4v_asinf,
__redirect__ZGVbN4v_asinf)
__attribute__ ((visibility ("hidden")));
#endif

View File

@ -0,0 +1,252 @@
/* Function asinf vectorized with SSE4.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
https://www.gnu.org/licenses/. */
/*
* ALGORITHM DESCRIPTION:
*
* SelMask = (|x| >= 0.5) ? 1 : 0;
* R = SelMask ? sqrt(0.5 - 0.5*|x|) : |x|
* asin(x) = (SelMask ? (Pi/2 - 2*Poly(R)) : Poly(R))*(-1)^sign(x)
*
*
*/
/* Offsets for data table __svml_sasin_data_internal
*/
#define AbsMask 0
#define OneHalf 16
#define SmallNorm 32
#define One 48
#define Two 64
#define sqrt_coeff 80
#define poly_coeff 112
#define Pi2H 192
#include <sysdep.h>
.text
.section .text.sse4,"ax",@progbits
ENTRY(_ZGVbN4v_asinf_sse4)
subq $72, %rsp
cfi_def_cfa_offset(80)
movaps %xmm0, %xmm2
movups __svml_sasin_data_internal(%rip), %xmm1
movups OneHalf+__svml_sasin_data_internal(%rip), %xmm5
/* x = |arg| */
movaps %xmm1, %xmm0
andps %xmm2, %xmm0
/* Y = 0.5 - 0.5*x */
movaps %xmm5, %xmm3
mulps %xmm0, %xmm3
movaps %xmm5, %xmm8
/* x^2 */
movaps %xmm0, %xmm14
movaps %xmm0, %xmm15
mulps %xmm0, %xmm14
subps %xmm3, %xmm8
cmpnltps %xmm5, %xmm15
/* SQ ~ -2*sqrt(Y) */
rsqrtps %xmm8, %xmm6
minps %xmm8, %xmm14
movaps %xmm8, %xmm9
movaps %xmm14, %xmm10
cmpltps SmallNorm+__svml_sasin_data_internal(%rip), %xmm9
mulps %xmm14, %xmm10
addps %xmm8, %xmm8
andnps %xmm6, %xmm9
movaps %xmm15, %xmm3
movaps %xmm9, %xmm7
andnps %xmm0, %xmm3
mulps %xmm9, %xmm7
andnps %xmm2, %xmm1
mulps %xmm8, %xmm9
mulps %xmm7, %xmm8
/* polynomial */
movups poly_coeff+__svml_sasin_data_internal(%rip), %xmm11
mulps %xmm14, %xmm11
subps Two+__svml_sasin_data_internal(%rip), %xmm8
movups poly_coeff+32+__svml_sasin_data_internal(%rip), %xmm12
mulps %xmm14, %xmm12
addps poly_coeff+16+__svml_sasin_data_internal(%rip), %xmm11
mulps %xmm10, %xmm11
addps poly_coeff+48+__svml_sasin_data_internal(%rip), %xmm12
movups sqrt_coeff+__svml_sasin_data_internal(%rip), %xmm13
addps %xmm11, %xmm12
mulps %xmm8, %xmm13
mulps %xmm9, %xmm8
mulps %xmm14, %xmm12
addps sqrt_coeff+16+__svml_sasin_data_internal(%rip), %xmm13
addps poly_coeff+64+__svml_sasin_data_internal(%rip), %xmm12
mulps %xmm8, %xmm13
mulps %xmm12, %xmm14
subps %xmm9, %xmm13
andps %xmm15, %xmm13
orps %xmm13, %xmm3
mulps %xmm3, %xmm14
movups One+__svml_sasin_data_internal(%rip), %xmm4
addps %xmm14, %xmm3
cmpltps %xmm0, %xmm4
movups Pi2H+__svml_sasin_data_internal(%rip), %xmm0
andps %xmm15, %xmm0
movmskps %xmm4, %edx
addps %xmm3, %xmm0
pxor %xmm1, %xmm0
testl %edx, %edx
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm2
/* Restore registers
* and exit the function
*/
L(EXIT):
addq $72, %rsp
cfi_def_cfa_offset(8)
ret
cfi_def_cfa_offset(80)
/* Branch to process
* special inputs
*/
L(SPECIAL_VALUES_BRANCH):
movups %xmm2, 32(%rsp)
movups %xmm0, 48(%rsp)
# LOE rbx rbp r12 r13 r14 r15 edx
xorl %eax, %eax
movq %r12, 16(%rsp)
cfi_offset(12, -64)
movl %eax, %r12d
movq %r13, 8(%rsp)
cfi_offset(13, -72)
movl %edx, %r13d
movq %r14, (%rsp)
cfi_offset(14, -80)
# LOE rbx rbp r15 r12d r13d
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx rbp r15 r12d r13d
/* Special inputs
* processing loop
*/
L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $4, %r12d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx rbp r15 r12d r13d
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
movups 48(%rsp), %xmm0
/* Go to exit */
jmp L(EXIT)
cfi_offset(12, -64)
cfi_offset(13, -72)
cfi_offset(14, -80)
# LOE rbx rbp r12 r13 r14 r15 xmm0
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movss 32(%rsp,%r14,4), %xmm0
call asinf@PLT
# LOE rbx rbp r14 r15 r12d r13d xmm0
movss %xmm0, 48(%rsp,%r14,4)
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx rbp r15 r12d r13d
END(_ZGVbN4v_asinf_sse4)
.section .rodata, "a"
.align 16
#ifdef __svml_sasin_data_internal_typedef
typedef unsigned int VUINT32;
typedef struct {
__declspec(align(16)) VUINT32 AbsMask[4][1];
__declspec(align(16)) VUINT32 OneHalf[4][1];
__declspec(align(16)) VUINT32 SmallNorm[4][1];
__declspec(align(16)) VUINT32 One[4][1];
__declspec(align(16)) VUINT32 Two[4][1];
__declspec(align(16)) VUINT32 sqrt_coeff[2][4][1];
__declspec(align(16)) VUINT32 poly_coeff[5][4][1];
__declspec(align(16)) VUINT32 Pi2H[4][1];
} __svml_sasin_data_internal;
#endif
__svml_sasin_data_internal:
/*== AbsMask ==*/
.long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff
/*== OneHalf ==*/
.align 16
.long 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000
/*== SmallNorm ==*/
.align 16
.long 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000
/*== One ==*/
.align 16
.long 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000
/*== Two ==*/
.align 16
.long 0x40000000, 0x40000000, 0x40000000, 0x40000000
/*== sqrt_coeff[2] ==*/
.align 16
.long 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004 /* sqrt_coeff2 */
.long 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001 /* sqrt_coeff1 */
/*== poly_coeff[5] ==*/
.align 16
.long 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07 /* poly_coeff5 */
.long 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B /* poly_coeff4 */
.long 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4 /* poly_coeff3 */
.long 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12 /* poly_coeff2 */
.long 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF /* poly_coeff1 */
/*== Pi2H ==*/
.align 16
.long 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB
.align 16
.type __svml_sasin_data_internal,@object
.size __svml_sasin_data_internal,.-__svml_sasin_data_internal

View File

@ -0,0 +1,20 @@
/* SSE version of vectorized asinf, vector length is 8.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define _ZGVdN8v_asinf _ZGVdN8v_asinf_sse_wrapper
#include "../svml_s_asinf8_core.S"

View File

@ -0,0 +1,28 @@
/* Multiple versions of vectorized asinf, vector length is 8.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define SYMBOL_NAME _ZGVdN8v_asinf
#include "ifunc-mathvec-avx2.h"
libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
#ifdef SHARED
__hidden_ver1 (_ZGVdN8v_asinf, __GI__ZGVdN8v_asinf,
__redirect__ZGVdN8v_asinf)
__attribute__ ((visibility ("hidden")));
#endif

View File

@ -0,0 +1,249 @@
/* Function asinf vectorized with AVX2.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
https://www.gnu.org/licenses/. */
/*
* ALGORITHM DESCRIPTION:
*
* SelMask = (|x| >= 0.5) ? 1 : 0;
* R = SelMask ? sqrt(0.5 - 0.5*|x|) : |x|
* asin(x) = (SelMask ? (Pi/2 - 2*Poly(R)) : Poly(R))*(-1)^sign(x)
*
*
*/
/* Offsets for data table __svml_sasin_data_internal
*/
#define AbsMask 0
#define OneHalf 32
#define SmallNorm 64
#define One 96
#define Two 128
#define sqrt_coeff 160
#define poly_coeff 224
#define Pi2H 384
#include <sysdep.h>
.text
.section .text.avx2,"ax",@progbits
ENTRY(_ZGVdN8v_asinf_avx2)
pushq %rbp
cfi_def_cfa_offset(16)
movq %rsp, %rbp
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
andq $-32, %rsp
subq $96, %rsp
vmovups __svml_sasin_data_internal(%rip), %ymm5
vmovups OneHalf+__svml_sasin_data_internal(%rip), %ymm9
vmovups One+__svml_sasin_data_internal(%rip), %ymm6
vmovaps %ymm0, %ymm4
/* x = |arg| */
vandps %ymm4, %ymm5, %ymm3
/* Y = 0.5 - 0.5*x */
vmovaps %ymm9, %ymm12
vfnmadd231ps %ymm3, %ymm9, %ymm12
/* x^2 */
vmulps %ymm3, %ymm3, %ymm7
vcmplt_oqps %ymm3, %ymm6, %ymm8
/* SQ ~ -2*sqrt(Y) */
vcmplt_oqps SmallNorm+__svml_sasin_data_internal(%rip), %ymm12, %ymm10
vminps %ymm12, %ymm7, %ymm1
vaddps %ymm12, %ymm12, %ymm15
vcmpnlt_uqps %ymm9, %ymm3, %ymm0
vrsqrtps %ymm12, %ymm11
vmovups poly_coeff+64+__svml_sasin_data_internal(%rip), %ymm7
vmulps %ymm1, %ymm1, %ymm6
vmovups sqrt_coeff+__svml_sasin_data_internal(%rip), %ymm9
vfmadd213ps poly_coeff+96+__svml_sasin_data_internal(%rip), %ymm1, %ymm7
vmovmskps %ymm8, %edx
/* polynomial */
vmovups poly_coeff+__svml_sasin_data_internal(%rip), %ymm8
vandnps %ymm11, %ymm10, %ymm13
vmulps %ymm13, %ymm13, %ymm14
vfmadd213ps poly_coeff+32+__svml_sasin_data_internal(%rip), %ymm1, %ymm8
vandnps %ymm4, %ymm5, %ymm2
vmulps %ymm15, %ymm13, %ymm5
vfmsub213ps Two+__svml_sasin_data_internal(%rip), %ymm14, %ymm15
vfmadd213ps %ymm7, %ymm6, %ymm8
vfmadd213ps sqrt_coeff+32+__svml_sasin_data_internal(%rip), %ymm15, %ymm9
vmulps %ymm15, %ymm5, %ymm15
vfmadd213ps poly_coeff+128+__svml_sasin_data_internal(%rip), %ymm1, %ymm8
vfmsub213ps %ymm5, %ymm15, %ymm9
vmulps %ymm8, %ymm1, %ymm1
vblendvps %ymm0, %ymm9, %ymm3, %ymm3
vfmadd213ps %ymm3, %ymm3, %ymm1
vandps Pi2H+__svml_sasin_data_internal(%rip), %ymm0, %ymm0
vaddps %ymm1, %ymm0, %ymm10
vxorps %ymm2, %ymm10, %ymm0
testl %edx, %edx
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx r12 r13 r14 r15 edx ymm0 ymm4
/* Restore registers
* and exit the function
*/
L(EXIT):
movq %rbp, %rsp
popq %rbp
cfi_def_cfa(7, 8)
cfi_restore(6)
ret
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
/* Branch to process
* special inputs
*/
L(SPECIAL_VALUES_BRANCH):
vmovups %ymm4, 32(%rsp)
vmovups %ymm0, 64(%rsp)
# LOE rbx r12 r13 r14 r15 edx ymm0
xorl %eax, %eax
# LOE rbx r12 r13 r14 r15 eax edx
vzeroupper
movq %r12, 16(%rsp)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
movl %eax, %r12d
movq %r13, 8(%rsp)
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
movl %edx, %r13d
movq %r14, (%rsp)
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
# LOE rbx r15 r12d r13d
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx r15 r12d r13d
/* Special inputs
* processing loop
*/
L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $8, %r12d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx r15 r12d r13d
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
vmovups 64(%rsp), %ymm0
/* Go to exit */
jmp L(EXIT)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
# LOE rbx r12 r13 r14 r15 ymm0
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movss 32(%rsp,%r14,4), %xmm0
call asinf@PLT
# LOE rbx r14 r15 r12d r13d xmm0
movss %xmm0, 64(%rsp,%r14,4)
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx r15 r12d r13d
END(_ZGVdN8v_asinf_avx2)
.section .rodata, "a"
.align 32
#ifdef __svml_sasin_data_internal_typedef
typedef unsigned int VUINT32;
typedef struct {
__declspec(align(32)) VUINT32 AbsMask[8][1];
__declspec(align(32)) VUINT32 OneHalf[8][1];
__declspec(align(32)) VUINT32 SmallNorm[8][1];
__declspec(align(32)) VUINT32 One[8][1];
__declspec(align(32)) VUINT32 Two[8][1];
__declspec(align(32)) VUINT32 sqrt_coeff[2][8][1];
__declspec(align(32)) VUINT32 poly_coeff[5][8][1];
__declspec(align(32)) VUINT32 Pi2H[8][1];
} __svml_sasin_data_internal;
#endif
__svml_sasin_data_internal:
/*== AbsMask ==*/
.long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff
/*== OneHalf ==*/
.align 32
.long 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000
/*== SmallNorm ==*/
.align 32
.long 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000
/*== One ==*/
.align 32
.long 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000
/*== Two ==*/
.align 32
.long 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000
/*== sqrt_coeff[2] ==*/
.align 32
.long 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004 /* sqrt_coeff2 */
.long 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001 /* sqrt_coeff1 */
/*== poly_coeff[5] ==*/
.align 32
.long 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07 /* poly_coeff5 */
.long 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B /* poly_coeff4 */
.long 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4 /* poly_coeff3 */
.long 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12 /* poly_coeff2 */
.long 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF /* poly_coeff1 */
/*== Pi2H ==*/
.align 32
.long 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB
.align 32
.type __svml_sasin_data_internal,@object
.size __svml_sasin_data_internal,.-__svml_sasin_data_internal

View File

@ -0,0 +1,29 @@
/* Function asin vectorized with SSE2.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_d_wrapper_impl.h"
.text
ENTRY (_ZGVbN2v_asin)
WRAPPER_IMPL_SSE2 asin
END (_ZGVbN2v_asin)
#ifndef USE_MULTIARCH
libmvec_hidden_def (_ZGVbN2v_asin)
#endif

View File

@ -0,0 +1,29 @@
/* Function asin vectorized with AVX2, wrapper version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_d_wrapper_impl.h"
.text
ENTRY (_ZGVdN4v_asin)
WRAPPER_IMPL_AVX _ZGVbN2v_asin
END (_ZGVdN4v_asin)
#ifndef USE_MULTIARCH
libmvec_hidden_def (_ZGVdN4v_asin)
#endif

View File

@ -0,0 +1,25 @@
/* Function asin vectorized in AVX ISA as wrapper to SSE4 ISA version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_d_wrapper_impl.h"
.text
ENTRY (_ZGVcN4v_asin)
WRAPPER_IMPL_AVX _ZGVbN2v_asin
END (_ZGVcN4v_asin)

View File

@ -0,0 +1,25 @@
/* Function asin vectorized with AVX-512, wrapper to AVX2.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_d_wrapper_impl.h"
.text
ENTRY (_ZGVeN8v_asin)
WRAPPER_IMPL_AVX512 _ZGVdN4v_asin
END (_ZGVeN8v_asin)

View File

@ -0,0 +1,25 @@
/* Function asinf vectorized with AVX-512. Wrapper to AVX2 version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_wrapper_impl.h"
.text
ENTRY (_ZGVeN16v_asinf)
WRAPPER_IMPL_AVX512 _ZGVdN8v_asinf
END (_ZGVeN16v_asinf)

View File

@ -0,0 +1,29 @@
/* Function asinf vectorized with SSE2, wrapper version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_wrapper_impl.h"
.text
ENTRY (_ZGVbN4v_asinf)
WRAPPER_IMPL_SSE2 asinf
END (_ZGVbN4v_asinf)
#ifndef USE_MULTIARCH
libmvec_hidden_def (_ZGVbN4v_asinf)
#endif

View File

@ -0,0 +1,29 @@
/* Function asinf vectorized with AVX2, wrapper version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_wrapper_impl.h"
.text
ENTRY (_ZGVdN8v_asinf)
WRAPPER_IMPL_AVX _ZGVbN4v_asinf
END (_ZGVdN8v_asinf)
#ifndef USE_MULTIARCH
libmvec_hidden_def (_ZGVdN8v_asinf)
#endif

View File

@ -0,0 +1,25 @@
/* Function asinf vectorized in AVX ISA as wrapper to SSE4 ISA version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_wrapper_impl.h"
.text
ENTRY (_ZGVcN8v_asinf)
WRAPPER_IMPL_AVX _ZGVbN4v_asinf
END (_ZGVcN8v_asinf)

View File

@ -0,0 +1 @@
#include "test-double-libmvec-asin.c"

View File

@ -0,0 +1 @@
#include "test-double-libmvec-asin.c"

View File

@ -0,0 +1 @@
#include "test-double-libmvec-asin.c"

View File

@ -0,0 +1,3 @@
#define LIBMVEC_TYPE double
#define LIBMVEC_FUNC asin
#include "test-vector-abi-arg1.h"

View File

@ -29,6 +29,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (exp), _ZGVbN2v_exp)
VECTOR_WRAPPER_ff (WRAPPER_NAME (pow), _ZGVbN2vv_pow) VECTOR_WRAPPER_ff (WRAPPER_NAME (pow), _ZGVbN2vv_pow)
VECTOR_WRAPPER (WRAPPER_NAME (acos), _ZGVbN2v_acos) VECTOR_WRAPPER (WRAPPER_NAME (acos), _ZGVbN2v_acos)
VECTOR_WRAPPER (WRAPPER_NAME (atan), _ZGVbN2v_atan) VECTOR_WRAPPER (WRAPPER_NAME (atan), _ZGVbN2v_atan)
VECTOR_WRAPPER (WRAPPER_NAME (asin), _ZGVbN2v_asin)
#define VEC_INT_TYPE __m128i #define VEC_INT_TYPE __m128i

View File

@ -32,6 +32,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (exp), _ZGVdN4v_exp)
VECTOR_WRAPPER_ff (WRAPPER_NAME (pow), _ZGVdN4vv_pow) VECTOR_WRAPPER_ff (WRAPPER_NAME (pow), _ZGVdN4vv_pow)
VECTOR_WRAPPER (WRAPPER_NAME (acos), _ZGVdN4v_acos) VECTOR_WRAPPER (WRAPPER_NAME (acos), _ZGVdN4v_acos)
VECTOR_WRAPPER (WRAPPER_NAME (atan), _ZGVdN4v_atan) VECTOR_WRAPPER (WRAPPER_NAME (atan), _ZGVdN4v_atan)
VECTOR_WRAPPER (WRAPPER_NAME (asin), _ZGVdN4v_asin)
#ifndef __ILP32__ #ifndef __ILP32__
# define VEC_INT_TYPE __m256i # define VEC_INT_TYPE __m256i

View File

@ -29,6 +29,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (exp), _ZGVcN4v_exp)
VECTOR_WRAPPER_ff (WRAPPER_NAME (pow), _ZGVcN4vv_pow) VECTOR_WRAPPER_ff (WRAPPER_NAME (pow), _ZGVcN4vv_pow)
VECTOR_WRAPPER (WRAPPER_NAME (acos), _ZGVcN4v_acos) VECTOR_WRAPPER (WRAPPER_NAME (acos), _ZGVcN4v_acos)
VECTOR_WRAPPER (WRAPPER_NAME (atan), _ZGVcN4v_atan) VECTOR_WRAPPER (WRAPPER_NAME (atan), _ZGVcN4v_atan)
VECTOR_WRAPPER (WRAPPER_NAME (asin), _ZGVcN4v_asin)
#define VEC_INT_TYPE __m128i #define VEC_INT_TYPE __m128i

View File

@ -29,6 +29,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (exp), _ZGVeN8v_exp)
VECTOR_WRAPPER_ff (WRAPPER_NAME (pow), _ZGVeN8vv_pow) VECTOR_WRAPPER_ff (WRAPPER_NAME (pow), _ZGVeN8vv_pow)
VECTOR_WRAPPER (WRAPPER_NAME (acos), _ZGVeN8v_acos) VECTOR_WRAPPER (WRAPPER_NAME (acos), _ZGVeN8v_acos)
VECTOR_WRAPPER (WRAPPER_NAME (atan), _ZGVeN8v_atan) VECTOR_WRAPPER (WRAPPER_NAME (atan), _ZGVeN8v_atan)
VECTOR_WRAPPER (WRAPPER_NAME (asin), _ZGVeN8v_asin)
#ifndef __ILP32__ #ifndef __ILP32__
# define VEC_INT_TYPE __m512i # define VEC_INT_TYPE __m512i

View File

@ -0,0 +1 @@
#include "test-float-libmvec-asinf.c"

View File

@ -0,0 +1 @@
#include "test-float-libmvec-asinf.c"

View File

@ -0,0 +1 @@
#include "test-float-libmvec-asinf.c"

View File

@ -0,0 +1,3 @@
#define LIBMVEC_TYPE float
#define LIBMVEC_FUNC asinf
#include "test-vector-abi-arg1.h"

View File

@ -29,6 +29,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (expf), _ZGVeN16v_expf)
VECTOR_WRAPPER_ff (WRAPPER_NAME (powf), _ZGVeN16vv_powf) VECTOR_WRAPPER_ff (WRAPPER_NAME (powf), _ZGVeN16vv_powf)
VECTOR_WRAPPER (WRAPPER_NAME (acosf), _ZGVeN16v_acosf) VECTOR_WRAPPER (WRAPPER_NAME (acosf), _ZGVeN16v_acosf)
VECTOR_WRAPPER (WRAPPER_NAME (atanf), _ZGVeN16v_atanf) VECTOR_WRAPPER (WRAPPER_NAME (atanf), _ZGVeN16v_atanf)
VECTOR_WRAPPER (WRAPPER_NAME (asinf), _ZGVeN16v_asinf)
#define VEC_INT_TYPE __m512i #define VEC_INT_TYPE __m512i

View File

@ -29,6 +29,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (expf), _ZGVbN4v_expf)
VECTOR_WRAPPER_ff (WRAPPER_NAME (powf), _ZGVbN4vv_powf) VECTOR_WRAPPER_ff (WRAPPER_NAME (powf), _ZGVbN4vv_powf)
VECTOR_WRAPPER (WRAPPER_NAME (acosf), _ZGVbN4v_acosf) VECTOR_WRAPPER (WRAPPER_NAME (acosf), _ZGVbN4v_acosf)
VECTOR_WRAPPER (WRAPPER_NAME (atanf), _ZGVbN4v_atanf) VECTOR_WRAPPER (WRAPPER_NAME (atanf), _ZGVbN4v_atanf)
VECTOR_WRAPPER (WRAPPER_NAME (asinf), _ZGVbN4v_asinf)
#define VEC_INT_TYPE __m128i #define VEC_INT_TYPE __m128i

View File

@ -32,6 +32,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (expf), _ZGVdN8v_expf)
VECTOR_WRAPPER_ff (WRAPPER_NAME (powf), _ZGVdN8vv_powf) VECTOR_WRAPPER_ff (WRAPPER_NAME (powf), _ZGVdN8vv_powf)
VECTOR_WRAPPER (WRAPPER_NAME (acosf), _ZGVdN8v_acosf) VECTOR_WRAPPER (WRAPPER_NAME (acosf), _ZGVdN8v_acosf)
VECTOR_WRAPPER (WRAPPER_NAME (atanf), _ZGVdN8v_atanf) VECTOR_WRAPPER (WRAPPER_NAME (atanf), _ZGVdN8v_atanf)
VECTOR_WRAPPER (WRAPPER_NAME (asinf), _ZGVdN8v_asinf)
/* Redefinition of wrapper to be compatible with _ZGVdN8vvv_sincosf. */ /* Redefinition of wrapper to be compatible with _ZGVdN8vvv_sincosf. */
#undef VECTOR_WRAPPER_fFF #undef VECTOR_WRAPPER_fFF

View File

@ -29,6 +29,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (expf), _ZGVcN8v_expf)
VECTOR_WRAPPER_ff (WRAPPER_NAME (powf), _ZGVcN8vv_powf) VECTOR_WRAPPER_ff (WRAPPER_NAME (powf), _ZGVcN8vv_powf)
VECTOR_WRAPPER (WRAPPER_NAME (acosf), _ZGVcN8v_acosf) VECTOR_WRAPPER (WRAPPER_NAME (acosf), _ZGVcN8v_acosf)
VECTOR_WRAPPER (WRAPPER_NAME (atanf), _ZGVcN8v_atanf) VECTOR_WRAPPER (WRAPPER_NAME (atanf), _ZGVcN8v_atanf)
VECTOR_WRAPPER (WRAPPER_NAME (asinf), _ZGVcN8v_asinf)
#define VEC_INT_TYPE __m128i #define VEC_INT_TYPE __m128i