x86-64: Add vector exp2/exp2f implementation to libmvec

Implement vectorized exp2/exp2f containing SSE, AVX, AVX2 and
AVX512 versions for libmvec as per vector ABI.  It also contains
accuracy and ABI tests for vector exp2/exp2f with regenerated ulps.

Reviewed-by: H.J. Lu <hjl.tools@gmail.com>
This commit is contained in:
Sunil K Pandey 2021-12-29 08:41:18 -08:00
parent 37475ba883
commit 3fc9ccc20b
50 changed files with 2293 additions and 1 deletions

View File

@ -142,4 +142,15 @@
#define __DECL_SIMD_hypotf32x
#define __DECL_SIMD_hypotf64x
#define __DECL_SIMD_hypotf128x
#define __DECL_SIMD_exp2
#define __DECL_SIMD_exp2f
#define __DECL_SIMD_exp2l
#define __DECL_SIMD_exp2f16
#define __DECL_SIMD_exp2f32
#define __DECL_SIMD_exp2f64
#define __DECL_SIMD_exp2f128
#define __DECL_SIMD_exp2f32x
#define __DECL_SIMD_exp2f64x
#define __DECL_SIMD_exp2f128x
#endif

View File

@ -127,7 +127,7 @@ __MATHCALL (logb,, (_Mdouble_ __x));
#ifdef __USE_ISOC99
/* Compute base-2 exponential of X. */
__MATHCALL (exp2,, (_Mdouble_ __x));
__MATHCALL_VEC (exp2,, (_Mdouble_ __x));
/* Compute base-2 logarithm of X. */
__MATHCALL (log2,, (_Mdouble_ __x));

View File

@ -49,32 +49,40 @@ GLIBC_2.22 _ZGVeN8vvv_sincos F
GLIBC_2.35 _ZGVbN2v_acos F
GLIBC_2.35 _ZGVbN2v_asin F
GLIBC_2.35 _ZGVbN2v_atan F
GLIBC_2.35 _ZGVbN2v_exp2 F
GLIBC_2.35 _ZGVbN2vv_hypot F
GLIBC_2.35 _ZGVbN4v_acosf F
GLIBC_2.35 _ZGVbN4v_asinf F
GLIBC_2.35 _ZGVbN4v_atanf F
GLIBC_2.35 _ZGVbN4v_exp2f F
GLIBC_2.35 _ZGVbN4vv_hypotf F
GLIBC_2.35 _ZGVcN4v_acos F
GLIBC_2.35 _ZGVcN4v_asin F
GLIBC_2.35 _ZGVcN4v_atan F
GLIBC_2.35 _ZGVcN4v_exp2 F
GLIBC_2.35 _ZGVcN4vv_hypot F
GLIBC_2.35 _ZGVcN8v_acosf F
GLIBC_2.35 _ZGVcN8v_asinf F
GLIBC_2.35 _ZGVcN8v_atanf F
GLIBC_2.35 _ZGVcN8v_exp2f F
GLIBC_2.35 _ZGVcN8vv_hypotf F
GLIBC_2.35 _ZGVdN4v_acos F
GLIBC_2.35 _ZGVdN4v_asin F
GLIBC_2.35 _ZGVdN4v_atan F
GLIBC_2.35 _ZGVdN4v_exp2 F
GLIBC_2.35 _ZGVdN4vv_hypot F
GLIBC_2.35 _ZGVdN8v_acosf F
GLIBC_2.35 _ZGVdN8v_asinf F
GLIBC_2.35 _ZGVdN8v_atanf F
GLIBC_2.35 _ZGVdN8v_exp2f F
GLIBC_2.35 _ZGVdN8vv_hypotf F
GLIBC_2.35 _ZGVeN16v_acosf F
GLIBC_2.35 _ZGVeN16v_asinf F
GLIBC_2.35 _ZGVeN16v_atanf F
GLIBC_2.35 _ZGVeN16v_exp2f F
GLIBC_2.35 _ZGVeN16vv_hypotf F
GLIBC_2.35 _ZGVeN8v_acos F
GLIBC_2.35 _ZGVeN8v_asin F
GLIBC_2.35 _ZGVeN8v_atan F
GLIBC_2.35 _ZGVeN8v_exp2 F
GLIBC_2.35 _ZGVeN8vv_hypot F

View File

@ -74,6 +74,10 @@
# define __DECL_SIMD_hypot __DECL_SIMD_x86_64
# undef __DECL_SIMD_hypotf
# define __DECL_SIMD_hypotf __DECL_SIMD_x86_64
# undef __DECL_SIMD_exp2
# define __DECL_SIMD_exp2 __DECL_SIMD_x86_64
# undef __DECL_SIMD_exp2f
# define __DECL_SIMD_exp2f __DECL_SIMD_x86_64
# endif
#endif

View File

@ -36,6 +36,8 @@
!GCC$ builtin (asinf) attributes simd (notinbranch) if('x86_64')
!GCC$ builtin (hypot) attributes simd (notinbranch) if('x86_64')
!GCC$ builtin (hypotf) attributes simd (notinbranch) if('x86_64')
!GCC$ builtin (exp2) attributes simd (notinbranch) if('x86_64')
!GCC$ builtin (exp2f) attributes simd (notinbranch) if('x86_64')
!GCC$ builtin (cos) attributes simd (notinbranch) if('x32')
!GCC$ builtin (cosf) attributes simd (notinbranch) if('x32')
@ -57,3 +59,5 @@
!GCC$ builtin (asinf) attributes simd (notinbranch) if('x32')
!GCC$ builtin (hypot) attributes simd (notinbranch) if('x32')
!GCC$ builtin (hypotf) attributes simd (notinbranch) if('x32')
!GCC$ builtin (exp2) attributes simd (notinbranch) if('x32')
!GCC$ builtin (exp2f) attributes simd (notinbranch) if('x32')

View File

@ -27,6 +27,7 @@ libmvec-funcs = \
atan \
cos \
exp \
exp2 \
hypot \
log \
pow \

View File

@ -17,10 +17,12 @@ libmvec {
_ZGVbN2v_acos; _ZGVcN4v_acos; _ZGVdN4v_acos; _ZGVeN8v_acos;
_ZGVbN2v_asin; _ZGVcN4v_asin; _ZGVdN4v_asin; _ZGVeN8v_asin;
_ZGVbN2v_atan; _ZGVcN4v_atan; _ZGVdN4v_atan; _ZGVeN8v_atan;
_ZGVbN2v_exp2; _ZGVcN4v_exp2; _ZGVdN4v_exp2; _ZGVeN8v_exp2;
_ZGVbN2vv_hypot; _ZGVcN4vv_hypot; _ZGVdN4vv_hypot; _ZGVeN8vv_hypot;
_ZGVbN4v_acosf; _ZGVcN8v_acosf; _ZGVdN8v_acosf; _ZGVeN16v_acosf;
_ZGVbN4v_asinf; _ZGVcN8v_asinf; _ZGVdN8v_asinf; _ZGVeN16v_asinf;
_ZGVbN4v_atanf; _ZGVcN8v_atanf; _ZGVdN8v_atanf; _ZGVeN16v_atanf;
_ZGVbN4v_exp2f; _ZGVcN8v_exp2f; _ZGVdN8v_exp2f; _ZGVeN16v_exp2f;
_ZGVbN4vv_hypotf; _ZGVcN8vv_hypotf; _ZGVdN8vv_hypotf; _ZGVeN16vv_hypotf;
}
}

View File

@ -1276,6 +1276,26 @@ float: 1
float128: 2
ldouble: 1
Function: "exp2_vlen16":
float: 1
Function: "exp2_vlen2":
double: 1
Function: "exp2_vlen4":
double: 1
float: 1
Function: "exp2_vlen4_avx2":
double: 1
Function: "exp2_vlen8":
double: 1
float: 1
Function: "exp2_vlen8_avx2":
float: 1
Function: "exp_downward":
double: 1
float: 1

View File

@ -0,0 +1,20 @@
/* SSE2 version of vectorized exp2, vector length is 2.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define _ZGVbN2v_exp2 _ZGVbN2v_exp2_sse2
#include "../svml_d_exp22_core.S"

View File

@ -0,0 +1,27 @@
/* Multiple versions of vectorized exp2, vector length is 2.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define SYMBOL_NAME _ZGVbN2v_exp2
#include "ifunc-mathvec-sse4_1.h"
libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
#ifdef SHARED
__hidden_ver1 (_ZGVbN2v_exp2, __GI__ZGVbN2v_exp2, __redirect__ZGVbN2v_exp2)
__attribute__ ((visibility ("hidden")));
#endif

View File

@ -0,0 +1,325 @@
/* Function exp2 vectorized with SSE4.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
https://www.gnu.org/licenses/. */
/*
* ALGORITHM DESCRIPTION:
*
* exp2(x) = 2^n * T[j] * (1 + P(y))
* where
* x = m*(1/K) + y, y in [-1/K..1/K]
* m = n*K + j, m,n,j - signed integer, j in [-K/2..K/2]
*
* values of 2^j/K are tabulated
*
* P(y) is a minimax polynomial approximation of exp2(x)-1
* on small interval [-1/K..1/K]
*
* Special cases:
*
* exp2(NaN) = NaN
* exp2(+INF) = +INF
* exp2(-INF) = 0
* exp2(x) = 1 for subnormals
* For IEEE double
* if x >= 1024.0 then exp2(x) overflows
* if x < -1076.0 then exp2(x) underflows
*
*/
/* Offsets for data table __svml_dexp2_data_internal
*/
#define _dbT 0
#define _dbShifter 1024
#define _dPC1 1040
#define _dPC2 1056
#define _dPC3 1072
#define _dPC4 1088
#define _lIndexMask 1104
#define _iAbsMask 1120
#define _iDomainRange 1136
#include <sysdep.h>
.text
.section .text.sse4,"ax",@progbits
ENTRY(_ZGVbN2v_exp2_sse4)
subq $72, %rsp
cfi_def_cfa_offset(80)
/* R */
movaps %xmm0, %xmm7
movups _dbShifter+__svml_dexp2_data_internal(%rip), %xmm1
/* out, basePtr, iIndex, iBaseOfs, iSize, iGran, iOfs */
lea __svml_dexp2_data_internal(%rip), %rsi
/* Load arument */
movaps %xmm1, %xmm10
addpd %xmm0, %xmm10
movaps %xmm10, %xmm6
subpd %xmm1, %xmm6
subpd %xmm6, %xmm7
/*
* Polynomial
* poly(dN) = a1*dR+...+a4*dR^4
*/
movups _dPC4+__svml_dexp2_data_internal(%rip), %xmm8
mulpd %xmm7, %xmm8
addpd _dPC3+__svml_dexp2_data_internal(%rip), %xmm8
mulpd %xmm7, %xmm8
addpd _dPC2+__svml_dexp2_data_internal(%rip), %xmm8
movdqu _lIndexMask+__svml_dexp2_data_internal(%rip), %xmm9
/* Index and lookup */
movdqa %xmm9, %xmm5
pandn %xmm10, %xmm9
pand %xmm10, %xmm5
/* 2^N */
psllq $45, %xmm9
movd %xmm5, %eax
movq _iAbsMask+__svml_dexp2_data_internal(%rip), %xmm2
/* Check for overflow\underflow */
pshufd $221, %xmm0, %xmm4
pextrw $4, %xmm5, %ecx
/* a1+...+a4*dR^3 ! */
mulpd %xmm7, %xmm8
shll $3, %eax
pand %xmm2, %xmm4
shll $3, %ecx
movq (%rsi,%rax), %xmm1
movhpd (%rsi,%rcx), %xmm1
/* dR=dR*dT */
mulpd %xmm1, %xmm7
addpd _dPC1+__svml_dexp2_data_internal(%rip), %xmm8
/*
* Reconstruction
* exp2 = {2^N later}*(Tj+Tj*poly)
* dN = dT+dT*dR*(a1+...+a4*dR^3)
*/
mulpd %xmm7, %xmm8
addpd %xmm8, %xmm1
movq _iDomainRange+__svml_dexp2_data_internal(%rip), %xmm3
pcmpgtd %xmm3, %xmm4
movmskps %xmm4, %edx
/* quick 2^N */
paddq %xmm9, %xmm1
andl $3, %edx
/* Finish */
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm1
/* Restore registers
* and exit the function
*/
L(EXIT):
movaps %xmm1, %xmm0
addq $72, %rsp
cfi_def_cfa_offset(8)
ret
cfi_def_cfa_offset(80)
/* Branch to process
* special inputs
*/
L(SPECIAL_VALUES_BRANCH):
movups %xmm0, 32(%rsp)
movups %xmm1, 48(%rsp)
# LOE rbx rbp r12 r13 r14 r15 edx xmm1
xorl %eax, %eax
movq %r12, 16(%rsp)
cfi_offset(12, -64)
movl %eax, %r12d
movq %r13, 8(%rsp)
cfi_offset(13, -72)
movl %edx, %r13d
movq %r14, (%rsp)
cfi_offset(14, -80)
# LOE rbx rbp r15 r12d r13d
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx rbp r15 r12d r13d
/* Special inputs
* processing loop
*/
L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $2, %r12d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx rbp r15 r12d r13d
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
movups 48(%rsp), %xmm1
/* Go to exit */
jmp L(EXIT)
cfi_offset(12, -64)
cfi_offset(13, -72)
cfi_offset(14, -80)
# LOE rbx rbp r12 r13 r14 r15 xmm1
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movsd 32(%rsp,%r14,8), %xmm0
call exp2@PLT
# LOE rbx rbp r14 r15 r12d r13d xmm0
movsd %xmm0, 48(%rsp,%r14,8)
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx rbp r15 r12d r13d
END(_ZGVbN2v_exp2_sse4)
.section .rodata, "a"
.align 16
#ifdef __svml_dexp2_data_internal_typedef
typedef unsigned int VUINT32;
typedef struct
{
__declspec(align(16)) VUINT32 _dbT[(1<<7)][2];
__declspec(align(16)) VUINT32 _dbShifter[2][2];
__declspec(align(16)) VUINT32 _dPC1[2][2];
__declspec(align(16)) VUINT32 _dPC2[2][2];
__declspec(align(16)) VUINT32 _dPC3[2][2];
__declspec(align(16)) VUINT32 _dPC4[2][2];
__declspec(align(16)) VUINT32 _lIndexMask[2][2];
__declspec(align(16)) VUINT32 _iAbsMask[4][1];
__declspec(align(16)) VUINT32 _iDomainRange[4][1];
} __svml_dexp2_data_internal;
#endif
__svml_dexp2_data_internal:
/*== _dbT ==*/
.quad 0x3ff0000000000000, 0x3ff0163da9fb3335 /*2^( 0 /128),2^( 1 /128)*/
.quad 0x3ff02c9a3e778061, 0x3ff04315e86e7f85 /*2^( 2 /128),2^( 3 /128)*/
.quad 0x3ff059b0d3158574, 0x3ff0706b29ddf6de /*2^( 4 /128),2^( 5 /128)*/
.quad 0x3ff0874518759bc8, 0x3ff09e3ecac6f383 /*2^( 6 /128),2^( 7 /128)*/
.quad 0x3ff0b5586cf9890f, 0x3ff0cc922b7247f7 /*2^( 8 /128),2^( 9 /128)*/
.quad 0x3ff0e3ec32d3d1a2, 0x3ff0fb66affed31b /*2^( 10 /128),2^( 11 /128)*/
.quad 0x3ff11301d0125b51, 0x3ff12abdc06c31cc /*2^( 12 /128),2^( 13 /128)*/
.quad 0x3ff1429aaea92de0, 0x3ff15a98c8a58e51 /*2^( 14 /128),2^( 15 /128)*/
.quad 0x3ff172b83c7d517b, 0x3ff18af9388c8dea /*2^( 16 /128),2^( 17 /128)*/
.quad 0x3ff1a35beb6fcb75, 0x3ff1bbe084045cd4 /*2^( 18 /128),2^( 19 /128)*/
.quad 0x3ff1d4873168b9aa, 0x3ff1ed5022fcd91d /*2^( 20 /128),2^( 21 /128)*/
.quad 0x3ff2063b88628cd6, 0x3ff21f49917ddc96 /*2^( 22 /128),2^( 23 /128)*/
.quad 0x3ff2387a6e756238, 0x3ff251ce4fb2a63f /*2^( 24 /128),2^( 25 /128)*/
.quad 0x3ff26b4565e27cdd, 0x3ff284dfe1f56381 /*2^( 26 /128),2^( 27 /128)*/
.quad 0x3ff29e9df51fdee1, 0x3ff2b87fd0dad990 /*2^( 28 /128),2^( 29 /128)*/
.quad 0x3ff2d285a6e4030b, 0x3ff2ecafa93e2f56 /*2^( 30 /128),2^( 31 /128)*/
.quad 0x3ff306fe0a31b715, 0x3ff32170fc4cd831 /*2^( 32 /128),2^( 33 /128)*/
.quad 0x3ff33c08b26416ff, 0x3ff356c55f929ff1 /*2^( 34 /128),2^( 35 /128)*/
.quad 0x3ff371a7373aa9cb, 0x3ff38cae6d05d866 /*2^( 36 /128),2^( 37 /128)*/
.quad 0x3ff3a7db34e59ff7, 0x3ff3c32dc313a8e5 /*2^( 38 /128),2^( 39 /128)*/
.quad 0x3ff3dea64c123422, 0x3ff3fa4504ac801c /*2^( 40 /128),2^( 41 /128)*/
.quad 0x3ff4160a21f72e2a, 0x3ff431f5d950a897 /*2^( 42 /128),2^( 43 /128)*/
.quad 0x3ff44e086061892d, 0x3ff46a41ed1d0057 /*2^( 44 /128),2^( 45 /128)*/
.quad 0x3ff486a2b5c13cd0, 0x3ff4a32af0d7d3de /*2^( 46 /128),2^( 47 /128)*/
.quad 0x3ff4bfdad5362a27, 0x3ff4dcb299fddd0d /*2^( 48 /128),2^( 49 /128)*/
.quad 0x3ff4f9b2769d2ca7, 0x3ff516daa2cf6642 /*2^( 50 /128),2^( 51 /128)*/
.quad 0x3ff5342b569d4f82, 0x3ff551a4ca5d920f /*2^( 52 /128),2^( 53 /128)*/
.quad 0x3ff56f4736b527da, 0x3ff58d12d497c7fd /*2^( 54 /128),2^( 55 /128)*/
.quad 0x3ff5ab07dd485429, 0x3ff5c9268a5946b7 /*2^( 56 /128),2^( 57 /128)*/
.quad 0x3ff5e76f15ad2148, 0x3ff605e1b976dc09 /*2^( 58 /128),2^( 59 /128)*/
.quad 0x3ff6247eb03a5585, 0x3ff6434634ccc320 /*2^( 60 /128),2^( 61 /128)*/
.quad 0x3ff6623882552225, 0x3ff68155d44ca973 /*2^( 62 /128),2^( 63 /128)*/
.quad 0x3ff6a09e667f3bcd, 0x3ff6c012750bdabf /*2^( 64 /128),2^( 65 /128)*/
.quad 0x3ff6dfb23c651a2f, 0x3ff6ff7df9519484 /*2^( 66 /128),2^( 67 /128)*/
.quad 0x3ff71f75e8ec5f74, 0x3ff73f9a48a58174 /*2^( 68 /128),2^( 69 /128)*/
.quad 0x3ff75feb564267c9, 0x3ff780694fde5d3f /*2^( 70 /128),2^( 71 /128)*/
.quad 0x3ff7a11473eb0187, 0x3ff7c1ed0130c132 /*2^( 72 /128),2^( 73 /128)*/
.quad 0x3ff7e2f336cf4e62, 0x3ff80427543e1a12 /*2^( 74 /128),2^( 75 /128)*/
.quad 0x3ff82589994cce13, 0x3ff8471a4623c7ad /*2^( 76 /128),2^( 77 /128)*/
.quad 0x3ff868d99b4492ed, 0x3ff88ac7d98a6699 /*2^( 78 /128),2^( 79 /128)*/
.quad 0x3ff8ace5422aa0db, 0x3ff8cf3216b5448c /*2^( 80 /128),2^( 81 /128)*/
.quad 0x3ff8f1ae99157736, 0x3ff9145b0b91ffc6 /*2^( 82 /128),2^( 83 /128)*/
.quad 0x3ff93737b0cdc5e5, 0x3ff95a44cbc8520f /*2^( 84 /128),2^( 85 /128)*/
.quad 0x3ff97d829fde4e50, 0x3ff9a0f170ca07ba /*2^( 86 /128),2^( 87 /128)*/
.quad 0x3ff9c49182a3f090, 0x3ff9e86319e32323 /*2^( 88 /128),2^( 89 /128)*/
.quad 0x3ffa0c667b5de565, 0x3ffa309bec4a2d33 /*2^( 90 /128),2^( 91 /128)*/
.quad 0x3ffa5503b23e255d, 0x3ffa799e1330b358 /*2^( 92 /128),2^( 93 /128)*/
.quad 0x3ffa9e6b5579fdbf, 0x3ffac36bbfd3f37a /*2^( 94 /128),2^( 95 /128)*/
.quad 0x3ffae89f995ad3ad, 0x3ffb0e07298db666 /*2^( 96 /128),2^( 97 /128)*/
.quad 0x3ffb33a2b84f15fb, 0x3ffb59728de5593a /*2^( 98 /128),2^( 99 /128)*/
.quad 0x3ffb7f76f2fb5e47, 0x3ffba5b030a1064a /*2^( 100 /128),2^( 101 /128)*/
.quad 0x3ffbcc1e904bc1d2, 0x3ffbf2c25bd71e09 /*2^( 102 /128),2^( 103 /128)*/
.quad 0x3ffc199bdd85529c, 0x3ffc40ab5fffd07a /*2^( 104 /128),2^( 105 /128)*/
.quad 0x3ffc67f12e57d14b, 0x3ffc8f6d9406e7b5 /*2^( 106 /128),2^( 107 /128)*/
.quad 0x3ffcb720dcef9069, 0x3ffcdf0b555dc3fa /*2^( 108 /128),2^( 109 /128)*/
.quad 0x3ffd072d4a07897c, 0x3ffd2f87080d89f2 /*2^( 110 /128),2^( 111 /128)*/
.quad 0x3ffd5818dcfba487, 0x3ffd80e316c98398 /*2^( 112 /128),2^( 113 /128)*/
.quad 0x3ffda9e603db3285, 0x3ffdd321f301b460 /*2^( 114 /128),2^( 115 /128)*/
.quad 0x3ffdfc97337b9b5f, 0x3ffe264614f5a129 /*2^( 116 /128),2^( 117 /128)*/
.quad 0x3ffe502ee78b3ff6, 0x3ffe7a51fbc74c83 /*2^( 118 /128),2^( 119 /128)*/
.quad 0x3ffea4afa2a490da, 0x3ffecf482d8e67f1 /*2^( 120 /128),2^( 121 /128)*/
.quad 0x3ffefa1bee615a27, 0x3fff252b376bba97 /*2^( 122 /128),2^( 123 /128)*/
.quad 0x3fff50765b6e4540, 0x3fff7bfdad9cbe14 /*2^( 124 /128),2^( 125 /128)*/
.quad 0x3fffa7c1819e90d8, 0x3fffd3c22b8f71f1 /*2^( 126 /128),2^( 127 /128)*/
.align 16
.quad 0x42c8000000000000, 0x42c8000000000000 /* _dbShifter - 0x433-7=0x42c shifted right on K!*/
//log2(relerr) = -53.547756365162
.align 16
.quad 0x3fe62e42fefa3685, 0x3fe62e42fefa3685 /* _dPC1 */
.align 16
.quad 0x3fcebfbdff82ca48, 0x3fcebfbdff82ca48 /* _dPC2 */
.align 16
.quad 0x3fac6b09b180f045, 0x3fac6b09b180f045 /* _dPC3 */
.align 16
.quad 0x3f83b2ab5bb1268f, 0x3f83b2ab5bb1268f /* _dPC4 */
.align 16
.quad 0x000000000000007f, 0x000000000000007f /* _lIndexMask =(2^K-1)*/
.align 16
.long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff /* _iAbsMask */
.align 16
.long 0x408fefff, 0x408fefff, 0x408fefff, 0x408fefff /* _iDomainRange */
.align 16
.type __svml_dexp2_data_internal,@object
.size __svml_dexp2_data_internal,.-__svml_dexp2_data_internal

View File

@ -0,0 +1,20 @@
/* SSE version of vectorized exp2, vector length is 4.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define _ZGVdN4v_exp2 _ZGVdN4v_exp2_sse_wrapper
#include "../svml_d_exp24_core.S"

View File

@ -0,0 +1,27 @@
/* Multiple versions of vectorized exp2, vector length is 4.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define SYMBOL_NAME _ZGVdN4v_exp2
#include "ifunc-mathvec-avx2.h"
libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
#ifdef SHARED
__hidden_ver1 (_ZGVdN4v_exp2, __GI__ZGVdN4v_exp2, __redirect__ZGVdN4v_exp2)
__attribute__ ((visibility ("hidden")));
#endif

View File

@ -0,0 +1,341 @@
/* Function exp2 vectorized with AVX2.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
https://www.gnu.org/licenses/. */
/*
* ALGORITHM DESCRIPTION:
*
* exp2(x) = 2^n * T[j] * (1 + P(y))
* where
* x = m*(1/K) + y, y in [-1/K..1/K]
* m = n*K + j, m,n,j - signed integer, j in [-K/2..K/2]
*
* values of 2^j/K are tabulated
*
* P(y) is a minimax polynomial approximation of exp2(x)-1
* on small interval [-1/K..1/K]
*
* Special cases:
*
* exp2(NaN) = NaN
* exp2(+INF) = +INF
* exp2(-INF) = 0
* exp2(x) = 1 for subnormals
* For IEEE double
* if x >= 1024.0 then exp2(x) overflows
* if x < -1076.0 then exp2(x) underflows
*
*/
/* Offsets for data table __svml_dexp2_data_internal
*/
#define _dbT 0
#define _dbShifter 1024
#define _dPC1 1056
#define _dPC2 1088
#define _dPC3 1120
#define _dPC4 1152
#define _lIndexMask 1184
#define _iAbsMask 1216
#define _iDomainRange 1248
#include <sysdep.h>
.text
.section .text.avx2,"ax",@progbits
ENTRY(_ZGVdN4v_exp2_avx2)
pushq %rbp
cfi_def_cfa_offset(16)
movq %rsp, %rbp
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
andq $-32, %rsp
subq $96, %rsp
/* out, basePtr, iIndex, iBaseOfs, iSize, iGran, iOfs */
lea __svml_dexp2_data_internal(%rip), %r8
vmovupd _dbShifter+__svml_dexp2_data_internal(%rip), %ymm4
vmovupd _lIndexMask+__svml_dexp2_data_internal(%rip), %ymm3
vmovapd %ymm0, %ymm1
/* Load arument */
vaddpd %ymm4, %ymm1, %ymm2
vsubpd %ymm4, %ymm2, %ymm0
/* Index and lookup */
vandps %ymm3, %ymm2, %ymm9
vpandn %ymm2, %ymm3, %ymm2
/* 2^N */
vpsllq $45, %ymm2, %ymm3
/* R */
vsubpd %ymm0, %ymm1, %ymm15
/* Check for overflow\underflow */
vextractf128 $1, %ymm1, %xmm5
/*
* Polynomial
* poly(dN) = a1*dR+...+a4*dR^4
*/
vmovupd _dPC4+__svml_dexp2_data_internal(%rip), %ymm0
vshufps $221, %xmm5, %xmm1, %xmm6
vandps _iAbsMask+__svml_dexp2_data_internal(%rip), %xmm6, %xmm7
vpcmpgtd _iDomainRange+__svml_dexp2_data_internal(%rip), %xmm7, %xmm8
vfmadd213pd _dPC3+__svml_dexp2_data_internal(%rip), %ymm15, %ymm0
vmovmskps %xmm8, %eax
vfmadd213pd _dPC2+__svml_dexp2_data_internal(%rip), %ymm15, %ymm0
/* a1+...+a4*dR^3 ! */
vfmadd213pd _dPC1+__svml_dexp2_data_internal(%rip), %ymm15, %ymm0
vextractf128 $1, %ymm9, %xmm12
vmovd %xmm9, %edx
vmovd %xmm12, %esi
shll $3, %edx
vpextrd $2, %xmm9, %ecx
shll $3, %esi
vpextrd $2, %xmm12, %edi
shll $3, %ecx
vmovq (%r8,%rdx), %xmm10
shll $3, %edi
vmovq (%r8,%rsi), %xmm13
vmovhpd (%r8,%rcx), %xmm10, %xmm11
vmovhpd (%r8,%rdi), %xmm13, %xmm14
vinsertf128 $1, %xmm14, %ymm11, %ymm4
/* dR=dR*dT */
vmulpd %ymm15, %ymm4, %ymm15
/*
* Reconstruction
* exp2 = {2^N later}*(Tj+Tj*poly)
* dN = dT+dT*dR*(a1+...+a4*dR^3)
*/
vfmadd213pd %ymm4, %ymm15, %ymm0
/* quick 2^N */
vpaddq %ymm3, %ymm0, %ymm0
/* Finish */
testl %eax, %eax
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx r12 r13 r14 r15 eax ymm0 ymm1
/* Restore registers
* and exit the function
*/
L(EXIT):
movq %rbp, %rsp
popq %rbp
cfi_def_cfa(7, 8)
cfi_restore(6)
ret
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
/* Branch to process
* special inputs
*/
L(SPECIAL_VALUES_BRANCH):
vmovupd %ymm1, 32(%rsp)
vmovupd %ymm0, 64(%rsp)
# LOE rbx r12 r13 r14 r15 eax ymm0
xorl %edx, %edx
# LOE rbx r12 r13 r14 r15 eax edx
vzeroupper
movq %r12, 16(%rsp)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
movl %edx, %r12d
movq %r13, 8(%rsp)
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
movl %eax, %r13d
movq %r14, (%rsp)
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
# LOE rbx r15 r12d r13d
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx r15 r12d r13d
/* Special inputs
* processing loop
*/
L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $4, %r12d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx r15 r12d r13d
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
vmovupd 64(%rsp), %ymm0
/* Go to exit */
jmp L(EXIT)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
# LOE rbx r12 r13 r14 r15 ymm0
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movsd 32(%rsp,%r14,8), %xmm0
call exp2@PLT
# LOE rbx r14 r15 r12d r13d xmm0
movsd %xmm0, 64(%rsp,%r14,8)
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx r15 r12d r13d
END(_ZGVdN4v_exp2_avx2)
.section .rodata, "a"
.align 32
#ifdef __svml_dexp2_data_internal_typedef
typedef unsigned int VUINT32;
typedef struct
{
__declspec(align(32)) VUINT32 _dbT[(1<<7)][2];
__declspec(align(32)) VUINT32 _dbShifter[4][2];
__declspec(align(32)) VUINT32 _dPC1[4][2];
__declspec(align(32)) VUINT32 _dPC2[4][2];
__declspec(align(32)) VUINT32 _dPC3[4][2];
__declspec(align(32)) VUINT32 _dPC4[4][2];
__declspec(align(32)) VUINT32 _lIndexMask[4][2];
__declspec(align(32)) VUINT32 _iAbsMask[8][1];
__declspec(align(32)) VUINT32 _iDomainRange[8][1];
} __svml_dexp2_data_internal;
#endif
__svml_dexp2_data_internal:
/*== _dbT ==*/
.quad 0x3ff0000000000000, 0x3ff0163da9fb3335 /*2^( 0 /128),2^( 1 /128)*/
.quad 0x3ff02c9a3e778061, 0x3ff04315e86e7f85 /*2^( 2 /128),2^( 3 /128)*/
.quad 0x3ff059b0d3158574, 0x3ff0706b29ddf6de /*2^( 4 /128),2^( 5 /128)*/
.quad 0x3ff0874518759bc8, 0x3ff09e3ecac6f383 /*2^( 6 /128),2^( 7 /128)*/
.quad 0x3ff0b5586cf9890f, 0x3ff0cc922b7247f7 /*2^( 8 /128),2^( 9 /128)*/
.quad 0x3ff0e3ec32d3d1a2, 0x3ff0fb66affed31b /*2^( 10 /128),2^( 11 /128)*/
.quad 0x3ff11301d0125b51, 0x3ff12abdc06c31cc /*2^( 12 /128),2^( 13 /128)*/
.quad 0x3ff1429aaea92de0, 0x3ff15a98c8a58e51 /*2^( 14 /128),2^( 15 /128)*/
.quad 0x3ff172b83c7d517b, 0x3ff18af9388c8dea /*2^( 16 /128),2^( 17 /128)*/
.quad 0x3ff1a35beb6fcb75, 0x3ff1bbe084045cd4 /*2^( 18 /128),2^( 19 /128)*/
.quad 0x3ff1d4873168b9aa, 0x3ff1ed5022fcd91d /*2^( 20 /128),2^( 21 /128)*/
.quad 0x3ff2063b88628cd6, 0x3ff21f49917ddc96 /*2^( 22 /128),2^( 23 /128)*/
.quad 0x3ff2387a6e756238, 0x3ff251ce4fb2a63f /*2^( 24 /128),2^( 25 /128)*/
.quad 0x3ff26b4565e27cdd, 0x3ff284dfe1f56381 /*2^( 26 /128),2^( 27 /128)*/
.quad 0x3ff29e9df51fdee1, 0x3ff2b87fd0dad990 /*2^( 28 /128),2^( 29 /128)*/
.quad 0x3ff2d285a6e4030b, 0x3ff2ecafa93e2f56 /*2^( 30 /128),2^( 31 /128)*/
.quad 0x3ff306fe0a31b715, 0x3ff32170fc4cd831 /*2^( 32 /128),2^( 33 /128)*/
.quad 0x3ff33c08b26416ff, 0x3ff356c55f929ff1 /*2^( 34 /128),2^( 35 /128)*/
.quad 0x3ff371a7373aa9cb, 0x3ff38cae6d05d866 /*2^( 36 /128),2^( 37 /128)*/
.quad 0x3ff3a7db34e59ff7, 0x3ff3c32dc313a8e5 /*2^( 38 /128),2^( 39 /128)*/
.quad 0x3ff3dea64c123422, 0x3ff3fa4504ac801c /*2^( 40 /128),2^( 41 /128)*/
.quad 0x3ff4160a21f72e2a, 0x3ff431f5d950a897 /*2^( 42 /128),2^( 43 /128)*/
.quad 0x3ff44e086061892d, 0x3ff46a41ed1d0057 /*2^( 44 /128),2^( 45 /128)*/
.quad 0x3ff486a2b5c13cd0, 0x3ff4a32af0d7d3de /*2^( 46 /128),2^( 47 /128)*/
.quad 0x3ff4bfdad5362a27, 0x3ff4dcb299fddd0d /*2^( 48 /128),2^( 49 /128)*/
.quad 0x3ff4f9b2769d2ca7, 0x3ff516daa2cf6642 /*2^( 50 /128),2^( 51 /128)*/
.quad 0x3ff5342b569d4f82, 0x3ff551a4ca5d920f /*2^( 52 /128),2^( 53 /128)*/
.quad 0x3ff56f4736b527da, 0x3ff58d12d497c7fd /*2^( 54 /128),2^( 55 /128)*/
.quad 0x3ff5ab07dd485429, 0x3ff5c9268a5946b7 /*2^( 56 /128),2^( 57 /128)*/
.quad 0x3ff5e76f15ad2148, 0x3ff605e1b976dc09 /*2^( 58 /128),2^( 59 /128)*/
.quad 0x3ff6247eb03a5585, 0x3ff6434634ccc320 /*2^( 60 /128),2^( 61 /128)*/
.quad 0x3ff6623882552225, 0x3ff68155d44ca973 /*2^( 62 /128),2^( 63 /128)*/
.quad 0x3ff6a09e667f3bcd, 0x3ff6c012750bdabf /*2^( 64 /128),2^( 65 /128)*/
.quad 0x3ff6dfb23c651a2f, 0x3ff6ff7df9519484 /*2^( 66 /128),2^( 67 /128)*/
.quad 0x3ff71f75e8ec5f74, 0x3ff73f9a48a58174 /*2^( 68 /128),2^( 69 /128)*/
.quad 0x3ff75feb564267c9, 0x3ff780694fde5d3f /*2^( 70 /128),2^( 71 /128)*/
.quad 0x3ff7a11473eb0187, 0x3ff7c1ed0130c132 /*2^( 72 /128),2^( 73 /128)*/
.quad 0x3ff7e2f336cf4e62, 0x3ff80427543e1a12 /*2^( 74 /128),2^( 75 /128)*/
.quad 0x3ff82589994cce13, 0x3ff8471a4623c7ad /*2^( 76 /128),2^( 77 /128)*/
.quad 0x3ff868d99b4492ed, 0x3ff88ac7d98a6699 /*2^( 78 /128),2^( 79 /128)*/
.quad 0x3ff8ace5422aa0db, 0x3ff8cf3216b5448c /*2^( 80 /128),2^( 81 /128)*/
.quad 0x3ff8f1ae99157736, 0x3ff9145b0b91ffc6 /*2^( 82 /128),2^( 83 /128)*/
.quad 0x3ff93737b0cdc5e5, 0x3ff95a44cbc8520f /*2^( 84 /128),2^( 85 /128)*/
.quad 0x3ff97d829fde4e50, 0x3ff9a0f170ca07ba /*2^( 86 /128),2^( 87 /128)*/
.quad 0x3ff9c49182a3f090, 0x3ff9e86319e32323 /*2^( 88 /128),2^( 89 /128)*/
.quad 0x3ffa0c667b5de565, 0x3ffa309bec4a2d33 /*2^( 90 /128),2^( 91 /128)*/
.quad 0x3ffa5503b23e255d, 0x3ffa799e1330b358 /*2^( 92 /128),2^( 93 /128)*/
.quad 0x3ffa9e6b5579fdbf, 0x3ffac36bbfd3f37a /*2^( 94 /128),2^( 95 /128)*/
.quad 0x3ffae89f995ad3ad, 0x3ffb0e07298db666 /*2^( 96 /128),2^( 97 /128)*/
.quad 0x3ffb33a2b84f15fb, 0x3ffb59728de5593a /*2^( 98 /128),2^( 99 /128)*/
.quad 0x3ffb7f76f2fb5e47, 0x3ffba5b030a1064a /*2^( 100 /128),2^( 101 /128)*/
.quad 0x3ffbcc1e904bc1d2, 0x3ffbf2c25bd71e09 /*2^( 102 /128),2^( 103 /128)*/
.quad 0x3ffc199bdd85529c, 0x3ffc40ab5fffd07a /*2^( 104 /128),2^( 105 /128)*/
.quad 0x3ffc67f12e57d14b, 0x3ffc8f6d9406e7b5 /*2^( 106 /128),2^( 107 /128)*/
.quad 0x3ffcb720dcef9069, 0x3ffcdf0b555dc3fa /*2^( 108 /128),2^( 109 /128)*/
.quad 0x3ffd072d4a07897c, 0x3ffd2f87080d89f2 /*2^( 110 /128),2^( 111 /128)*/
.quad 0x3ffd5818dcfba487, 0x3ffd80e316c98398 /*2^( 112 /128),2^( 113 /128)*/
.quad 0x3ffda9e603db3285, 0x3ffdd321f301b460 /*2^( 114 /128),2^( 115 /128)*/
.quad 0x3ffdfc97337b9b5f, 0x3ffe264614f5a129 /*2^( 116 /128),2^( 117 /128)*/
.quad 0x3ffe502ee78b3ff6, 0x3ffe7a51fbc74c83 /*2^( 118 /128),2^( 119 /128)*/
.quad 0x3ffea4afa2a490da, 0x3ffecf482d8e67f1 /*2^( 120 /128),2^( 121 /128)*/
.quad 0x3ffefa1bee615a27, 0x3fff252b376bba97 /*2^( 122 /128),2^( 123 /128)*/
.quad 0x3fff50765b6e4540, 0x3fff7bfdad9cbe14 /*2^( 124 /128),2^( 125 /128)*/
.quad 0x3fffa7c1819e90d8, 0x3fffd3c22b8f71f1 /*2^( 126 /128),2^( 127 /128)*/
.align 32
.quad 0x42c8000000000000, 0x42c8000000000000, 0x42c8000000000000, 0x42c8000000000000 /* _dbShifter - 0x433-7=0x42c shifted right on K!*/
//log2(relerr) = -53.547756365162
.align 32
.quad 0x3fe62e42fefa3685, 0x3fe62e42fefa3685, 0x3fe62e42fefa3685, 0x3fe62e42fefa3685 /* _dPC1 */
.align 32
.quad 0x3fcebfbdff82ca48, 0x3fcebfbdff82ca48, 0x3fcebfbdff82ca48, 0x3fcebfbdff82ca48 /* _dPC2 */
.align 32
.quad 0x3fac6b09b180f045, 0x3fac6b09b180f045, 0x3fac6b09b180f045, 0x3fac6b09b180f045 /* _dPC3 */
.align 32
.quad 0x3f83b2ab5bb1268f, 0x3f83b2ab5bb1268f, 0x3f83b2ab5bb1268f, 0x3f83b2ab5bb1268f /* _dPC4 */
.align 32
.quad 0x000000000000007f, 0x000000000000007f, 0x000000000000007f, 0x000000000000007f /* _lIndexMask =(2^K-1)*/
.align 32
.long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff /* _iAbsMask */
.align 32
.long 0x408fefff, 0x408fefff, 0x408fefff, 0x408fefff, 0x408fefff, 0x408fefff, 0x408fefff, 0x408fefff /* _iDomainRange */
.align 32
.type __svml_dexp2_data_internal,@object
.size __svml_dexp2_data_internal,.-__svml_dexp2_data_internal

View File

@ -0,0 +1,20 @@
/* AVX2 version of vectorized exp2, vector length is 8.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define _ZGVeN8v_exp2 _ZGVeN8v_exp2_avx2_wrapper
#include "../svml_d_exp28_core.S"

View File

@ -0,0 +1,27 @@
/* Multiple versions of vectorized exp2, vector length is 8.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define SYMBOL_NAME _ZGVeN8v_exp2
#include "ifunc-mathvec-avx512-skx.h"
libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
#ifdef SHARED
__hidden_ver1 (_ZGVeN8v_exp2, __GI__ZGVeN8v_exp2, __redirect__ZGVeN8v_exp2)
__attribute__ ((visibility ("hidden")));
#endif

View File

@ -0,0 +1,301 @@
/* Function exp2 vectorized with AVX-512.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
https://www.gnu.org/licenses/. */
/*
* ALGORITHM DESCRIPTION:
*
* Double precision mantissa represented as: 1.b1b2b3 ... b52
* Constant for double precision: S = 2^48 x 1.5
*
* 2^X = 2^Xo x 2^{X-Xo}
* 2^X = 2^K x 2^fo x 2^{X-Xo}
* 2^X = 2^K x 2^fo x 2^r
*
* 2^K --> Manual scaling
* 2^fo --> Table lookup
* r --> 1 + poly (r = X - Xo)
*
* Xo = K + fo
* Xo = K + 0.x1x2x3x4
*
* r = X - Xo
* = Vreduce(X, imm)
* = X - VRndScale(X, imm), where Xo = VRndScale(X, imm)
*
* Rnd(S + X) = S + Xo, where S is selected as S = 2^19 x 1.5
* S + X = S + floor(X) + 0.x1x2x3x4
* Rnd(S + X) = Rnd(2^48 x 1.5 + X)
* (Note: 2^exp x 1.b1b2b3 ... b52, 2^{exp-52} = 2^-4 for exp=48)
*
* exp2(x) = 2^K x 2^fo x (1 + poly(r)), where 2^r = 1 + poly(r)
*
* Scale back:
* dest = src1 x 2^floor(src2)
*
*
*/
/* Offsets for data table __svml_dexp2_data_internal_avx512
*/
#define Frac_PowerD0 0
#define poly_coeff1 128
#define poly_coeff2 192
#define poly_coeff3 256
#define poly_coeff4 320
#define poly_coeff5 384
#define poly_coeff6 448
#define add_const 512
#define AbsMask 576
#define Threshold 640
#define _lIndexMask 704
#include <sysdep.h>
.text
.section .text.evex512,"ax",@progbits
ENTRY(_ZGVeN8v_exp2_skx)
pushq %rbp
cfi_def_cfa_offset(16)
movq %rsp, %rbp
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
andq $-64, %rsp
subq $192, %rsp
vmovups poly_coeff5+__svml_dexp2_data_internal_avx512(%rip), %zmm14
vmovups poly_coeff6+__svml_dexp2_data_internal_avx512(%rip), %zmm6
/*
* Reduced argument
* where VREDUCE is available
*/
vreducepd $65, {sae}, %zmm0, %zmm10
vmovups poly_coeff4+__svml_dexp2_data_internal_avx512(%rip), %zmm7
vmovups add_const+__svml_dexp2_data_internal_avx512(%rip), %zmm3
vmovups poly_coeff3+__svml_dexp2_data_internal_avx512(%rip), %zmm8
vmovups __svml_dexp2_data_internal_avx512(%rip), %zmm13
/* c6*r + c5 */
vfmadd231pd {rn-sae}, %zmm10, %zmm6, %zmm14
vmovups poly_coeff2+__svml_dexp2_data_internal_avx512(%rip), %zmm9
vmovups Threshold+__svml_dexp2_data_internal_avx512(%rip), %zmm2
/*
*
* HA
* Variables and constants
* Load constants and vector(s)
*/
vmovups poly_coeff1+__svml_dexp2_data_internal_avx512(%rip), %zmm11
/* c6*r^2 + c5*r + c4 */
vfmadd213pd {rn-sae}, %zmm7, %zmm10, %zmm14
/*
* Integer form of K+0.b1b2b3b4 in lower bits - call K_plus_f0
* Mantisssa of normalized double precision FP: 1.b1b2...b52
*/
vaddpd {rd-sae}, %zmm3, %zmm0, %zmm4
vandpd AbsMask+__svml_dexp2_data_internal_avx512(%rip), %zmm0, %zmm1
/* c6*r^3 + c5*r^2 + c4*r + c3 */
vfmadd213pd {rn-sae}, %zmm8, %zmm10, %zmm14
vcmppd $29, {sae}, %zmm2, %zmm1, %k0
/* c6*r^4 + c5*r^3 + c4*r^2 + c3*r + c2 */
vfmadd213pd {rn-sae}, %zmm9, %zmm10, %zmm14
kmovw %k0, %edx
/* c6*r^5 + c5*r^4 + c4*r^3 + c3*r^2 + c2*r + c1 */
vfmadd213pd {rn-sae}, %zmm11, %zmm10, %zmm14
/* Table value: 2^(0.b1b2b3b4) */
vpandq _lIndexMask+__svml_dexp2_data_internal_avx512(%rip), %zmm4, %zmm5
vpermt2pd Frac_PowerD0+64+__svml_dexp2_data_internal_avx512(%rip), %zmm5, %zmm13
/* T*r */
vmulpd {rn-sae}, %zmm10, %zmm13, %zmm12
/* T + (T*r*(c6*r^5 + c5*r^4 + c4*r^3 + c3*r^2 + c2*r + c1)) */
vfmadd213pd {rn-sae}, %zmm13, %zmm12, %zmm14
/* Scaling placed at the end to avoid accuracy loss when T*r*scale underflows */
vscalefpd {rn-sae}, %zmm0, %zmm14, %zmm1
testl %edx, %edx
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx r12 r13 r14 r15 edx zmm0 zmm1
/* Restore registers
* and exit the function
*/
L(EXIT):
vmovaps %zmm1, %zmm0
movq %rbp, %rsp
popq %rbp
cfi_def_cfa(7, 8)
cfi_restore(6)
ret
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
/* Branch to process
* special inputs
*/
L(SPECIAL_VALUES_BRANCH):
vmovups %zmm0, 64(%rsp)
vmovups %zmm1, 128(%rsp)
# LOE rbx r12 r13 r14 r15 edx zmm1
xorl %eax, %eax
# LOE rbx r12 r13 r14 r15 eax edx
vzeroupper
movq %r12, 16(%rsp)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
movl %eax, %r12d
movq %r13, 8(%rsp)
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
movl %edx, %r13d
movq %r14, (%rsp)
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r15 r12d r13d
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx r15 r12d r13d
/* Special inputs
* processing loop
*/
L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $8, %r12d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx r15 r12d r13d
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
vmovups 128(%rsp), %zmm1
/* Go to exit */
jmp L(EXIT)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r12 r13 r14 r15 zmm1
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movsd 64(%rsp,%r14,8), %xmm0
call exp2@PLT
# LOE rbx r14 r15 r12d r13d xmm0
movsd %xmm0, 128(%rsp,%r14,8)
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx r15 r12d r13d
END(_ZGVeN8v_exp2_skx)
.section .rodata, "a"
.align 64
#ifdef __svml_dexp2_data_internal_avx512_typedef
typedef unsigned int VUINT32;
typedef struct {
__declspec(align(64)) VUINT32 Frac_PowerD0[16][2];
__declspec(align(64)) VUINT32 poly_coeff1[8][2];
__declspec(align(64)) VUINT32 poly_coeff2[8][2];
__declspec(align(64)) VUINT32 poly_coeff3[8][2];
__declspec(align(64)) VUINT32 poly_coeff4[8][2];
__declspec(align(64)) VUINT32 poly_coeff5[8][2];
__declspec(align(64)) VUINT32 poly_coeff6[8][2];
__declspec(align(64)) VUINT32 add_const[8][2];
__declspec(align(64)) VUINT32 AbsMask[8][2];
__declspec(align(64)) VUINT32 Threshold[8][2];
__declspec(align(64)) VUINT32 _lIndexMask[8][2];
} __svml_dexp2_data_internal_avx512;
#endif
__svml_dexp2_data_internal_avx512:
/*== Frac_PowerD0 ==*/
.quad 0x3FF0000000000000
.quad 0x3FF0B5586CF9890F
.quad 0x3FF172B83C7D517B
.quad 0x3FF2387A6E756238
.quad 0x3FF306FE0A31B715
.quad 0x3FF3DEA64C123422
.quad 0x3FF4BFDAD5362A27
.quad 0x3FF5AB07DD485429
.quad 0x3FF6A09E667F3BCD
.quad 0x3FF7A11473EB0187
.quad 0x3FF8ACE5422AA0DB
.quad 0x3FF9C49182A3F090
.quad 0x3FFAE89F995AD3AD
.quad 0x3FFC199BDD85529C
.quad 0x3FFD5818DCFBA487
.quad 0x3FFEA4AFA2A490DA
.align 64
.quad 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B /*== poly_coeff1 ==*/
.align 64
.quad 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A /*== poly_coeff2 ==*/
.align 64
.quad 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9 /*== poly_coeff3 ==*/
.align 64
.quad 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252 /*== poly_coeff4 ==*/
.align 64
.quad 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19 /*== poly_coeff5 ==*/
.align 64
.quad 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B /*== poly_coeff6 ==*/
.align 64
.quad 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000 /* add_const */
.align 64
.quad 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff /* AbsMask */
.align 64
.quad 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000 /* Threshold */
.align 64
.quad 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F /* _lIndexMask */
.align 64
.type __svml_dexp2_data_internal_avx512,@object
.size __svml_dexp2_data_internal_avx512,.-__svml_dexp2_data_internal_avx512

View File

@ -0,0 +1,20 @@
/* AVX2 version of vectorized exp2f.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define _ZGVeN16v_exp2f _ZGVeN16v_exp2f_avx2_wrapper
#include "../svml_s_exp2f16_core.S"

View File

@ -0,0 +1,28 @@
/* Multiple versions of vectorized exp2f, vector length is 16.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define SYMBOL_NAME _ZGVeN16v_exp2f
#include "ifunc-mathvec-avx512-skx.h"
libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
#ifdef SHARED
__hidden_ver1 (_ZGVeN16v_exp2f, __GI__ZGVeN16v_exp2f,
__redirect__ZGVeN16v_exp2f)
__attribute__ ((visibility ("hidden")));
#endif

View File

@ -0,0 +1,271 @@
/* Function exp2f vectorized with AVX-512.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
https://www.gnu.org/licenses/. */
/*
* ALGORITHM DESCRIPTION:
*
* Single precision mantissa represented as: 1.b1b2b3 ... b23
* Constant for single precision: S = 2^19 x 1.5
*
* 2^X = 2^Xo x 2^{X-Xo}
* 2^X = 2^K x 2^fo x 2^{X-Xo}
* 2^X = 2^K x 2^fo x 2^r
*
* 2^K --> Manual scaling
* 2^fo --> Table lookup
* r --> 1 + poly (r = X - Xo)
*
* Xo = K + fo
* Xo = K + 0.x1x2x3x4
*
* r = X - Xo
* = Vreduce(X, imm)
* = X - VRndScale(X, imm), where Xo = VRndScale(X, imm)
*
* Rnd(S + X) = S + Xo, where S is selected as S = 2^19 x 1.5
* S + X = S + floor(X) + 0.x1x2x3x4
* Rnd(S + X) = Rnd(2^19 x 1.5 + X)
* (Note: 2^exp x 1.b1b2b3 ... b23, 2^{exp-23} = 2^-4 for exp=19)
*
* exp2(x) = 2^K x 2^fo x (1 + poly(r)), where 2^r = 1 + poly(r)
*
* Scale back:
* dest = src1 x 2^floor(src2)
*
*
*/
/* Offsets for data table __svml_sexp2_data_internal_avx512
*/
#define Frac_PowerS0 0
#define poly_coeff1 64
#define poly_coeff2 128
#define poly_coeff3 192
#define add_const 256
#define AbsMask 320
#define Threshold 384
#include <sysdep.h>
.text
.section .text.exex512,"ax",@progbits
ENTRY(_ZGVeN16v_exp2f_skx)
pushq %rbp
cfi_def_cfa_offset(16)
movq %rsp, %rbp
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
andq $-64, %rsp
subq $192, %rsp
vmovups add_const+__svml_sexp2_data_internal_avx512(%rip), %zmm3
/*
* Reduced argument
* where VREDUCE is available
*/
vreduceps $65, {sae}, %zmm0, %zmm6
vmovups poly_coeff3+__svml_sexp2_data_internal_avx512(%rip), %zmm5
vmovups poly_coeff2+__svml_sexp2_data_internal_avx512(%rip), %zmm10
vmovups Threshold+__svml_sexp2_data_internal_avx512(%rip), %zmm2
/*
*
* HA
* Variables and constants
* Load constants and vector(s)
*/
vmovups poly_coeff1+__svml_sexp2_data_internal_avx512(%rip), %zmm7
/*
* Integer form of K+0.b1b2b3b4 in lower bits - call K_plus_f0
* Mantisssa of normalized single precision FP: 1.b1b2...b23
*/
vaddps {rd-sae}, %zmm3, %zmm0, %zmm4
vandps AbsMask+__svml_sexp2_data_internal_avx512(%rip), %zmm0, %zmm1
/* c3*r + c2 */
vfmadd231ps {rn-sae}, %zmm6, %zmm5, %zmm10
vcmpps $30, {sae}, %zmm2, %zmm1, %k0
/* c3*r^2 + c2*r + c1 */
vfmadd213ps {rn-sae}, %zmm7, %zmm6, %zmm10
/* Table value: 2^(0.b1b2b3b4) */
vpermps __svml_sexp2_data_internal_avx512(%rip), %zmm4, %zmm9
kmovw %k0, %edx
/* T*r */
vmulps {rn-sae}, %zmm6, %zmm9, %zmm8
/* T + (T*r*(c3*r^2 + c2*r + c1) */
vfmadd213ps {rn-sae}, %zmm9, %zmm8, %zmm10
/* Scaling placed at the end to avoid accuracy loss when T*r*scale underflows */
vscalefps {rn-sae}, %zmm0, %zmm10, %zmm1
testl %edx, %edx
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx r12 r13 r14 r15 edx zmm0 zmm1
/* Restore registers
* and exit the function
*/
L(EXIT):
vmovaps %zmm1, %zmm0
movq %rbp, %rsp
popq %rbp
cfi_def_cfa(7, 8)
cfi_restore(6)
ret
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
/* Branch to process
* special inputs
*/
L(SPECIAL_VALUES_BRANCH):
vmovups %zmm0, 64(%rsp)
vmovups %zmm1, 128(%rsp)
# LOE rbx r12 r13 r14 r15 edx zmm1
xorl %eax, %eax
# LOE rbx r12 r13 r14 r15 eax edx
vzeroupper
movq %r12, 16(%rsp)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
movl %eax, %r12d
movq %r13, 8(%rsp)
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
movl %edx, %r13d
movq %r14, (%rsp)
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r15 r12d r13d
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx r15 r12d r13d
/* Special inputs
* processing loop
*/
L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $16, %r12d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx r15 r12d r13d
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
vmovups 128(%rsp), %zmm1
/* Go to exit */
jmp L(EXIT)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r12 r13 r14 r15 zmm1
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movss 64(%rsp,%r14,4), %xmm0
call exp2f@PLT
# LOE rbx r14 r15 r12d r13d xmm0
movss %xmm0, 128(%rsp,%r14,4)
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx r15 r12d r13d
END(_ZGVeN16v_exp2f_skx)
.section .rodata, "a"
.align 64
#ifdef __svml_sexp2_data_internal_avx512_typedef
typedef unsigned int VUINT32;
typedef struct {
__declspec(align(64)) VUINT32 Frac_PowerS0[16][1];
__declspec(align(64)) VUINT32 poly_coeff1[16][1];
__declspec(align(64)) VUINT32 poly_coeff2[16][1];
__declspec(align(64)) VUINT32 poly_coeff3[16][1];
__declspec(align(64)) VUINT32 add_const[16][1];
__declspec(align(64)) VUINT32 AbsMask[16][1];
__declspec(align(64)) VUINT32 Threshold[16][1];
} __svml_sexp2_data_internal_avx512;
#endif
__svml_sexp2_data_internal_avx512:
/*== Frac_PowerS0 ==*/
.long 0x3F800000
.long 0x3F85AAC3
.long 0x3F8B95C2
.long 0x3F91C3D3
.long 0x3F9837F0
.long 0x3F9EF532
.long 0x3FA5FED7
.long 0x3FAD583F
.long 0x3FB504F3
.long 0x3FBD08A4
.long 0x3FC5672A
.long 0x3FCE248C
.long 0x3FD744FD
.long 0x3FE0CCDF
.long 0x3FEAC0C7
.long 0x3FF5257D
.align 64
.long 0x3F317222, 0x3F317222, 0x3F317222, 0x3F317222, 0x3F317222, 0x3F317222, 0x3F317222, 0x3F317222, 0x3F317222, 0x3F317222, 0x3F317222, 0x3F317222, 0x3F317222, 0x3F317222, 0x3F317222, 0x3F317222 /*== poly_coeff1 ==*/
.align 64
.long 0x3E75F16B, 0x3E75F16B, 0x3E75F16B, 0x3E75F16B, 0x3E75F16B, 0x3E75F16B, 0x3E75F16B, 0x3E75F16B, 0x3E75F16B, 0x3E75F16B, 0x3E75F16B, 0x3E75F16B, 0x3E75F16B, 0x3E75F16B, 0x3E75F16B, 0x3E75F16B /*== poly_coeff2 ==*/
.align 64
.long 0x3D6854CA, 0x3D6854CA, 0x3D6854CA, 0x3D6854CA, 0x3D6854CA, 0x3D6854CA, 0x3D6854CA, 0x3D6854CA, 0x3D6854CA, 0x3D6854CA, 0x3D6854CA, 0x3D6854CA, 0x3D6854CA, 0x3D6854CA, 0x3D6854CA, 0x3D6854CA /*== poly_coeff3 ==*/
.align 64
.long 0x49400000, 0x49400000, 0x49400000, 0x49400000, 0x49400000, 0x49400000, 0x49400000, 0x49400000, 0x49400000, 0x49400000, 0x49400000, 0x49400000, 0x49400000, 0x49400000, 0x49400000, 0x49400000 /* add_const */
.align 64
.long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff /* AbsMask */
.align 64
.long 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000 /* Threshold=126.0 */
.align 64
.type __svml_sexp2_data_internal_avx512,@object
.size __svml_sexp2_data_internal_avx512,.-__svml_sexp2_data_internal_avx512

View File

@ -0,0 +1,20 @@
/* SSE2 version of vectorized exp2f, vector length is 4.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define _ZGVbN4v_exp2f _ZGVbN4v_exp2f_sse2
#include "../svml_s_exp2f4_core.S"

View File

@ -0,0 +1,28 @@
/* Multiple versions of vectorized exp2f, vector length is 4.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define SYMBOL_NAME _ZGVbN4v_exp2f
#include "ifunc-mathvec-sse4_1.h"
libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
#ifdef SHARED
__hidden_ver1 (_ZGVbN4v_exp2f, __GI__ZGVbN4v_exp2f,
__redirect__ZGVbN4v_exp2f)
__attribute__ ((visibility ("hidden")));
#endif

View File

@ -0,0 +1,238 @@
/* Function exp2f vectorized with SSE4.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
https://www.gnu.org/licenses/. */
/*
* ALGORITHM DESCRIPTION:
*
* exp2(x) = 2^n * T[j] * (1 + P(y))
* where
* x = m*(1/K) + y, y in [-1/K..1/K]
* m = n*K + j, m,n,j - signed integer, j in [-K/2..K/2]
*
* values of 2^j/K are tabulated
*
* P(y) is a minimax polynomial approximation of exp2(x)-1
* on small interval [-1/K..1/K]
*
* Special cases:
*
* exp2(NaN) = NaN
* exp2(+INF) = +INF
* exp2(-INF) = 0
* exp2(x) = 1 for subnormals
* For IEEE float
* if x >= 128.0 then exp2f(x) overflow
* if x < -151.0 then exp2f(x) underflow
*
*/
/* Offsets for data table __svml_sexp2_data_internal
*/
#define _sShifter 0
#define _sPC0 16
#define _sPC1 32
#define _sPC2 48
#define _sPC3 64
#define _sPC4 80
#define _sPC5 96
#define _sPC6 112
#define _iAbsMask 128
#define _iDomainRange 144
#include <sysdep.h>
.text
.section .text.sse4,"ax",@progbits
ENTRY(_ZGVbN4v_exp2f_sse4)
subq $72, %rsp
cfi_def_cfa_offset(80)
/* Check for overflow\underflow */
movups __svml_sexp2_data_internal(%rip), %xmm1
/* Implementation */
movaps %xmm1, %xmm5
/* Polynomial */
movups _sPC6+__svml_sexp2_data_internal(%rip), %xmm4
addps %xmm0, %xmm5
movaps %xmm5, %xmm3
/* 2^N */
pslld $23, %xmm5
/* Check for overflow\underflow */
movdqu _iAbsMask+__svml_sexp2_data_internal(%rip), %xmm2
subps %xmm1, %xmm3
/* R */
movaps %xmm0, %xmm1
pand %xmm0, %xmm2
pcmpgtd _iDomainRange+__svml_sexp2_data_internal(%rip), %xmm2
subps %xmm3, %xmm1
movmskps %xmm2, %edx
mulps %xmm1, %xmm4
addps _sPC5+__svml_sexp2_data_internal(%rip), %xmm4
mulps %xmm1, %xmm4
addps _sPC4+__svml_sexp2_data_internal(%rip), %xmm4
mulps %xmm1, %xmm4
addps _sPC3+__svml_sexp2_data_internal(%rip), %xmm4
mulps %xmm1, %xmm4
addps _sPC2+__svml_sexp2_data_internal(%rip), %xmm4
mulps %xmm1, %xmm4
addps _sPC1+__svml_sexp2_data_internal(%rip), %xmm4
mulps %xmm4, %xmm1
addps _sPC0+__svml_sexp2_data_internal(%rip), %xmm1
/* Reconstruction */
paddd %xmm5, %xmm1
testl %edx, %edx
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm1
/* Restore registers
* and exit the function
*/
L(EXIT):
movaps %xmm1, %xmm0
addq $72, %rsp
cfi_def_cfa_offset(8)
ret
cfi_def_cfa_offset(80)
/* Branch to process
* special inputs
*/
L(SPECIAL_VALUES_BRANCH):
movups %xmm0, 32(%rsp)
movups %xmm1, 48(%rsp)
# LOE rbx rbp r12 r13 r14 r15 edx
xorl %eax, %eax
movq %r12, 16(%rsp)
cfi_offset(12, -64)
movl %eax, %r12d
movq %r13, 8(%rsp)
cfi_offset(13, -72)
movl %edx, %r13d
movq %r14, (%rsp)
cfi_offset(14, -80)
# LOE rbx rbp r15 r12d r13d
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx rbp r15 r12d r13d
/* Special inputs
* processing loop
*/
L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $4, %r12d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx rbp r15 r12d r13d
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
movups 48(%rsp), %xmm1
/* Go to exit */
jmp L(EXIT)
cfi_offset(12, -64)
cfi_offset(13, -72)
cfi_offset(14, -80)
# LOE rbx rbp r12 r13 r14 r15 xmm1
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movss 32(%rsp,%r14,4), %xmm0
call exp2f@PLT
# LOE rbx rbp r14 r15 r12d r13d xmm0
movss %xmm0, 48(%rsp,%r14,4)
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx rbp r15 r12d r13d
END(_ZGVbN4v_exp2f_sse4)
.section .rodata, "a"
.align 16
#ifdef __svml_sexp2_data_internal_typedef
typedef unsigned int VUINT32;
typedef struct
{
__declspec(align(16)) VUINT32 _sShifter[4][1];
__declspec(align(16)) VUINT32 _sPC0[4][1];
__declspec(align(16)) VUINT32 _sPC1[4][1];
__declspec(align(16)) VUINT32 _sPC2[4][1];
__declspec(align(16)) VUINT32 _sPC3[4][1];
__declspec(align(16)) VUINT32 _sPC4[4][1];
__declspec(align(16)) VUINT32 _sPC5[4][1];
__declspec(align(16)) VUINT32 _sPC6[4][1];
__declspec(align(16)) VUINT32 _iAbsMask[4][1];
__declspec(align(16)) VUINT32 _iDomainRange[4][1];
} __svml_sexp2_data_internal;
#endif
__svml_sexp2_data_internal:
.long 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000 /* _sShifter */
.align 16
.long 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000 /* _sPC0 */
.align 16
.long 0x3f317218, 0x3f317218, 0x3f317218, 0x3f317218 /* _sPC1 */
.align 16
.long 0x3e75fdef, 0x3e75fdef, 0x3e75fdef, 0x3e75fdef /* _sPC2 */
.align 16
.long 0x3d6357cf, 0x3d6357cf, 0x3d6357cf, 0x3d6357cf /* _sPC3 */
.align 16
.long 0x3c1d962c, 0x3c1d962c, 0x3c1d962c, 0x3c1d962c /* _sPC4 */
.align 16
.long 0x3aaf7a51, 0x3aaf7a51, 0x3aaf7a51, 0x3aaf7a51 /* _sPC5 */
.align 16
.long 0x39213c8c, 0x39213c8c, 0x39213c8c, 0x39213c8c /* _sPC6 */
//common
.align 16
.long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff /* _iAbsMask */
.align 16
.long 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000 /* _iDomainRange=126.0 */
.align 16
.type __svml_sexp2_data_internal,@object
.size __svml_sexp2_data_internal,.-__svml_sexp2_data_internal

View File

@ -0,0 +1,20 @@
/* SSE version of vectorized exp2f, vector length is 8.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define _ZGVdN8v_exp2f _ZGVdN8v_exp2f_sse_wrapper
#include "../svml_s_exp2f8_core.S"

View File

@ -0,0 +1,28 @@
/* Multiple versions of vectorized exp2f, vector length is 8.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define SYMBOL_NAME _ZGVdN8v_exp2f
#include "ifunc-mathvec-avx2.h"
libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
#ifdef SHARED
__hidden_ver1 (_ZGVdN8v_exp2f, __GI__ZGVdN8v_exp2f,
__redirect__ZGVdN8v_exp2f)
__attribute__ ((visibility ("hidden")));
#endif

View File

@ -0,0 +1,245 @@
/* Function exp2f vectorized with AVX2.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
https://www.gnu.org/licenses/. */
/*
* ALGORITHM DESCRIPTION:
*
* exp2(x) = 2^n * T[j] * (1 + P(y))
* where
* x = m*(1/K) + y, y in [-1/K..1/K]
* m = n*K + j, m,n,j - signed integer, j in [-K/2..K/2]
*
* values of 2^j/K are tabulated
*
* P(y) is a minimax polynomial approximation of exp2(x)-1
* on small interval [-1/K..1/K]
*
* Special cases:
*
* exp2(NaN) = NaN
* exp2(+INF) = +INF
* exp2(-INF) = 0
* exp2(x) = 1 for subnormals
* For IEEE float
* if x >= 128.0 then exp2f(x) overflow
* if x < -151.0 then exp2f(x) underflow
*
*/
/* Offsets for data table __svml_sexp2_data_internal
*/
#define _sShifter 0
#define _sPC0 32
#define _sPC1 64
#define _sPC2 96
#define _sPC3 128
#define _sPC4 160
#define _sPC5 192
#define _sPC6 224
#define _iAbsMask 256
#define _iDomainRange 288
#include <sysdep.h>
.text
.section .text.avx2,"ax",@progbits
ENTRY(_ZGVdN8v_exp2f_avx2)
pushq %rbp
cfi_def_cfa_offset(16)
movq %rsp, %rbp
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
andq $-32, %rsp
subq $96, %rsp
vmovups __svml_sexp2_data_internal(%rip), %ymm1
/* Check for overflow\underflow */
vmovups _sPC6+__svml_sexp2_data_internal(%rip), %ymm7
/* Implementation */
vaddps %ymm1, %ymm0, %ymm6
vsubps %ymm1, %ymm6, %ymm4
/* 2^N */
vpslld $23, %ymm6, %ymm8
/* R */
vsubps %ymm4, %ymm0, %ymm5
/* Polynomial */
vfmadd213ps _sPC5+__svml_sexp2_data_internal(%rip), %ymm5, %ymm7
vfmadd213ps _sPC4+__svml_sexp2_data_internal(%rip), %ymm5, %ymm7
vfmadd213ps _sPC3+__svml_sexp2_data_internal(%rip), %ymm5, %ymm7
vfmadd213ps _sPC2+__svml_sexp2_data_internal(%rip), %ymm5, %ymm7
vfmadd213ps _sPC1+__svml_sexp2_data_internal(%rip), %ymm5, %ymm7
vfmadd213ps _sPC0+__svml_sexp2_data_internal(%rip), %ymm5, %ymm7
/* Check for overflow\underflow */
vandps _iAbsMask+__svml_sexp2_data_internal(%rip), %ymm0, %ymm2
vpcmpgtd _iDomainRange+__svml_sexp2_data_internal(%rip), %ymm2, %ymm3
vmovmskps %ymm3, %edx
/* Reconstruction */
vpaddd %ymm8, %ymm7, %ymm1
testl %edx, %edx
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx r12 r13 r14 r15 edx ymm0 ymm1
/* Restore registers
* and exit the function
*/
L(EXIT):
vmovaps %ymm1, %ymm0
movq %rbp, %rsp
popq %rbp
cfi_def_cfa(7, 8)
cfi_restore(6)
ret
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
/* Branch to process
* special inputs
*/
L(SPECIAL_VALUES_BRANCH):
vmovups %ymm0, 32(%rsp)
vmovups %ymm1, 64(%rsp)
# LOE rbx r12 r13 r14 r15 edx ymm1
xorl %eax, %eax
# LOE rbx r12 r13 r14 r15 eax edx
vzeroupper
movq %r12, 16(%rsp)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
movl %eax, %r12d
movq %r13, 8(%rsp)
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
movl %edx, %r13d
movq %r14, (%rsp)
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
# LOE rbx r15 r12d r13d
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx r15 r12d r13d
/* Special inputs
* processing loop
*/
L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $8, %r12d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx r15 r12d r13d
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
vmovups 64(%rsp), %ymm1
/* Go to exit */
jmp L(EXIT)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
# LOE rbx r12 r13 r14 r15 ymm1
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movss 32(%rsp,%r14,4), %xmm0
call exp2f@PLT
# LOE rbx r14 r15 r12d r13d xmm0
movss %xmm0, 64(%rsp,%r14,4)
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx r15 r12d r13d
END(_ZGVdN8v_exp2f_avx2)
.section .rodata, "a"
.align 32
#ifdef __svml_sexp2_data_internal_typedef
typedef unsigned int VUINT32;
typedef struct
{
__declspec(align(32)) VUINT32 _sShifter[8][1];
__declspec(align(32)) VUINT32 _sPC0[8][1];
__declspec(align(32)) VUINT32 _sPC1[8][1];
__declspec(align(32)) VUINT32 _sPC2[8][1];
__declspec(align(32)) VUINT32 _sPC3[8][1];
__declspec(align(32)) VUINT32 _sPC4[8][1];
__declspec(align(32)) VUINT32 _sPC5[8][1];
__declspec(align(32)) VUINT32 _sPC6[8][1];
__declspec(align(32)) VUINT32 _iAbsMask[8][1];
__declspec(align(32)) VUINT32 _iDomainRange[8][1];
} __svml_sexp2_data_internal;
#endif
__svml_sexp2_data_internal:
.long 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000 /* _sShifter */
.align 32
.long 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000 /* _sPC0 */
.align 32
.long 0x3f317218, 0x3f317218, 0x3f317218, 0x3f317218, 0x3f317218, 0x3f317218, 0x3f317218, 0x3f317218 /* _sPC1 */
.align 32
.long 0x3e75fdef, 0x3e75fdef, 0x3e75fdef, 0x3e75fdef, 0x3e75fdef, 0x3e75fdef, 0x3e75fdef, 0x3e75fdef /* _sPC2 */
.align 32
.long 0x3d6357cf, 0x3d6357cf, 0x3d6357cf, 0x3d6357cf, 0x3d6357cf, 0x3d6357cf, 0x3d6357cf, 0x3d6357cf /* _sPC3 */
.align 32
.long 0x3c1d962c, 0x3c1d962c, 0x3c1d962c, 0x3c1d962c, 0x3c1d962c, 0x3c1d962c, 0x3c1d962c, 0x3c1d962c /* _sPC4 */
.align 32
.long 0x3aaf7a51, 0x3aaf7a51, 0x3aaf7a51, 0x3aaf7a51, 0x3aaf7a51, 0x3aaf7a51, 0x3aaf7a51, 0x3aaf7a51 /* _sPC5 */
.align 32
.long 0x39213c8c, 0x39213c8c, 0x39213c8c, 0x39213c8c, 0x39213c8c, 0x39213c8c, 0x39213c8c, 0x39213c8c /* _sPC6 */
//common
.align 32
.long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff /* _iAbsMask */
.align 32
.long 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000 /* _iDomainRange=126.0 */
.align 32
.type __svml_sexp2_data_internal,@object
.size __svml_sexp2_data_internal,.-__svml_sexp2_data_internal

View File

@ -0,0 +1,29 @@
/* Function exp2 vectorized with SSE2.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_d_wrapper_impl.h"
.text
ENTRY (_ZGVbN2v_exp2)
WRAPPER_IMPL_SSE2 exp2
END (_ZGVbN2v_exp2)
#ifndef USE_MULTIARCH
libmvec_hidden_def (_ZGVbN2v_exp2)
#endif

View File

@ -0,0 +1,29 @@
/* Function exp2 vectorized with AVX2, wrapper version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_d_wrapper_impl.h"
.text
ENTRY (_ZGVdN4v_exp2)
WRAPPER_IMPL_AVX _ZGVbN2v_exp2
END (_ZGVdN4v_exp2)
#ifndef USE_MULTIARCH
libmvec_hidden_def (_ZGVdN4v_exp2)
#endif

View File

@ -0,0 +1,25 @@
/* Function exp2 vectorized in AVX ISA as wrapper to SSE4 ISA version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_d_wrapper_impl.h"
.text
ENTRY (_ZGVcN4v_exp2)
WRAPPER_IMPL_AVX _ZGVbN2v_exp2
END (_ZGVcN4v_exp2)

View File

@ -0,0 +1,25 @@
/* Function exp2 vectorized with AVX-512, wrapper to AVX2.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_d_wrapper_impl.h"
.text
ENTRY (_ZGVeN8v_exp2)
WRAPPER_IMPL_AVX512 _ZGVdN4v_exp2
END (_ZGVeN8v_exp2)

View File

@ -0,0 +1,25 @@
/* Function exp2f vectorized with AVX-512. Wrapper to AVX2 version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_wrapper_impl.h"
.text
ENTRY (_ZGVeN16v_exp2f)
WRAPPER_IMPL_AVX512 _ZGVdN8v_exp2f
END (_ZGVeN16v_exp2f)

View File

@ -0,0 +1,29 @@
/* Function exp2f vectorized with SSE2, wrapper version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_wrapper_impl.h"
.text
ENTRY (_ZGVbN4v_exp2f)
WRAPPER_IMPL_SSE2 exp2f
END (_ZGVbN4v_exp2f)
#ifndef USE_MULTIARCH
libmvec_hidden_def (_ZGVbN4v_exp2f)
#endif

View File

@ -0,0 +1,29 @@
/* Function exp2f vectorized with AVX2, wrapper version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_wrapper_impl.h"
.text
ENTRY (_ZGVdN8v_exp2f)
WRAPPER_IMPL_AVX _ZGVbN4v_exp2f
END (_ZGVdN8v_exp2f)
#ifndef USE_MULTIARCH
libmvec_hidden_def (_ZGVdN8v_exp2f)
#endif

View File

@ -0,0 +1,25 @@
/* Function exp2f vectorized in AVX ISA as wrapper to SSE4 ISA version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "svml_s_wrapper_impl.h"
.text
ENTRY (_ZGVcN8v_exp2f)
WRAPPER_IMPL_AVX _ZGVbN4v_exp2f
END (_ZGVcN8v_exp2f)

View File

@ -0,0 +1 @@
#include "test-double-libmvec-exp2.c"

View File

@ -0,0 +1 @@
#include "test-double-libmvec-exp2.c"

View File

@ -0,0 +1 @@
#include "test-double-libmvec-exp2.c"

View File

@ -0,0 +1,3 @@
#define LIBMVEC_TYPE double
#define LIBMVEC_FUNC exp2
#include "test-vector-abi-arg1.h"

View File

@ -31,6 +31,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (acos), _ZGVbN2v_acos)
VECTOR_WRAPPER (WRAPPER_NAME (atan), _ZGVbN2v_atan)
VECTOR_WRAPPER (WRAPPER_NAME (asin), _ZGVbN2v_asin)
VECTOR_WRAPPER_ff (WRAPPER_NAME (hypot), _ZGVbN2vv_hypot)
VECTOR_WRAPPER (WRAPPER_NAME (exp2), _ZGVbN2v_exp2)
#define VEC_INT_TYPE __m128i

View File

@ -34,6 +34,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (acos), _ZGVdN4v_acos)
VECTOR_WRAPPER (WRAPPER_NAME (atan), _ZGVdN4v_atan)
VECTOR_WRAPPER (WRAPPER_NAME (asin), _ZGVdN4v_asin)
VECTOR_WRAPPER_ff (WRAPPER_NAME (hypot), _ZGVdN4vv_hypot)
VECTOR_WRAPPER (WRAPPER_NAME (exp2), _ZGVdN4v_exp2)
#ifndef __ILP32__
# define VEC_INT_TYPE __m256i

View File

@ -31,6 +31,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (acos), _ZGVcN4v_acos)
VECTOR_WRAPPER (WRAPPER_NAME (atan), _ZGVcN4v_atan)
VECTOR_WRAPPER (WRAPPER_NAME (asin), _ZGVcN4v_asin)
VECTOR_WRAPPER_ff (WRAPPER_NAME (hypot), _ZGVcN4vv_hypot)
VECTOR_WRAPPER (WRAPPER_NAME (exp2), _ZGVcN4v_exp2)
#define VEC_INT_TYPE __m128i

View File

@ -31,6 +31,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (acos), _ZGVeN8v_acos)
VECTOR_WRAPPER (WRAPPER_NAME (atan), _ZGVeN8v_atan)
VECTOR_WRAPPER (WRAPPER_NAME (asin), _ZGVeN8v_asin)
VECTOR_WRAPPER_ff (WRAPPER_NAME (hypot), _ZGVeN8vv_hypot)
VECTOR_WRAPPER (WRAPPER_NAME (exp2), _ZGVeN8v_exp2)
#ifndef __ILP32__
# define VEC_INT_TYPE __m512i

View File

@ -0,0 +1 @@
#include "test-float-libmvec-exp2f.c"

View File

@ -0,0 +1 @@
#include "test-float-libmvec-exp2f.c"

View File

@ -0,0 +1 @@
#include "test-float-libmvec-exp2f.c"

View File

@ -0,0 +1,3 @@
#define LIBMVEC_TYPE float
#define LIBMVEC_FUNC exp2f
#include "test-vector-abi-arg1.h"

View File

@ -31,6 +31,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (acosf), _ZGVeN16v_acosf)
VECTOR_WRAPPER (WRAPPER_NAME (atanf), _ZGVeN16v_atanf)
VECTOR_WRAPPER (WRAPPER_NAME (asinf), _ZGVeN16v_asinf)
VECTOR_WRAPPER_ff (WRAPPER_NAME (hypotf), _ZGVeN16vv_hypotf)
VECTOR_WRAPPER (WRAPPER_NAME (exp2f), _ZGVeN16v_exp2f)
#define VEC_INT_TYPE __m512i

View File

@ -31,6 +31,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (acosf), _ZGVbN4v_acosf)
VECTOR_WRAPPER (WRAPPER_NAME (atanf), _ZGVbN4v_atanf)
VECTOR_WRAPPER (WRAPPER_NAME (asinf), _ZGVbN4v_asinf)
VECTOR_WRAPPER_ff (WRAPPER_NAME (hypotf), _ZGVbN4vv_hypotf)
VECTOR_WRAPPER (WRAPPER_NAME (exp2f), _ZGVbN4v_exp2f)
#define VEC_INT_TYPE __m128i

View File

@ -34,6 +34,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (acosf), _ZGVdN8v_acosf)
VECTOR_WRAPPER (WRAPPER_NAME (atanf), _ZGVdN8v_atanf)
VECTOR_WRAPPER (WRAPPER_NAME (asinf), _ZGVdN8v_asinf)
VECTOR_WRAPPER_ff (WRAPPER_NAME (hypotf), _ZGVdN8vv_hypotf)
VECTOR_WRAPPER (WRAPPER_NAME (exp2f), _ZGVdN8v_exp2f)
/* Redefinition of wrapper to be compatible with _ZGVdN8vvv_sincosf. */
#undef VECTOR_WRAPPER_fFF

View File

@ -31,6 +31,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (acosf), _ZGVcN8v_acosf)
VECTOR_WRAPPER (WRAPPER_NAME (atanf), _ZGVcN8v_atanf)
VECTOR_WRAPPER (WRAPPER_NAME (asinf), _ZGVcN8v_asinf)
VECTOR_WRAPPER_ff (WRAPPER_NAME (hypotf), _ZGVcN8vv_hypotf)
VECTOR_WRAPPER (WRAPPER_NAME (exp2f), _ZGVcN8v_exp2f)
#define VEC_INT_TYPE __m128i