mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-22 02:40:08 +00:00
Vector powf for x86_64 and tests.
Here is implementation of vectorized powf containing SSE, AVX, AVX2 and AVX512 versions according to Vector ABI <https://groups.google.com/forum/#!topic/x86-64-abi/LmppCfN1rZ4>. * sysdeps/unix/sysv/linux/x86_64/libmvec.abilist: New symbols added. * sysdeps/x86/fpu/bits/math-vector.h: Added SIMD declaration and asm redirections for powf. * sysdeps/x86_64/fpu/Makefile (libmvec-support): Added new files. * sysdeps/x86_64/fpu/Versions: New versions added. * sysdeps/x86_64/fpu/libm-test-ulps: Regenerated. * sysdeps/x86_64/fpu/multiarch/Makefile (libmvec-sysdep_routines): Added build of SSE, AVX2 and AVX512 IFUNC versions. * sysdeps/x86_64/fpu/svml_s_wrapper_impl.h: Added 2 argument wrappers. * sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core_sse4.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core_avx2.S: New file. * sysdeps/x86_64/fpu/svml_s_powf16_core.S: New file. * sysdeps/x86_64/fpu/svml_s_powf4_core.S: New file. * sysdeps/x86_64/fpu/svml_s_powf8_core.S: New file. * sysdeps/x86_64/fpu/svml_s_powf8_core_avx.S: New file. * sysdeps/x86_64/fpu/svml_s_powf_data.S: New file. * sysdeps/x86_64/fpu/svml_s_powf_data.h: New file. * sysdeps/x86_64/fpu/test-float-vlen16-wrappers.c: Vector powf tests. * sysdeps/x86_64/fpu/test-float-vlen16.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen4-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen4.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8-avx2-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8-avx2.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8.c: Likewise. * math/test-float-vlen16.h: Fixed 2 argument macro. * math/test-float-vlen4.h: Likewise. * math/test-float-vlen8.h: Likewise. * NEWS: Mention addition of x86_64 vector powf.
This commit is contained in:
parent
2f3184451d
commit
8aa92022e2
36
ChangeLog
36
ChangeLog
@ -1,3 +1,39 @@
|
||||
2015-06-18 Andrew Senkevich <andrew.senkevich@intel.com>
|
||||
|
||||
* sysdeps/unix/sysv/linux/x86_64/libmvec.abilist: New symbols added.
|
||||
* sysdeps/x86/fpu/bits/math-vector.h: Added SIMD declaration and asm
|
||||
redirections for powf.
|
||||
* sysdeps/x86_64/fpu/Makefile (libmvec-support): Added new files.
|
||||
* sysdeps/x86_64/fpu/Versions: New versions added.
|
||||
* sysdeps/x86_64/fpu/libm-test-ulps: Regenerated.
|
||||
* sysdeps/x86_64/fpu/multiarch/Makefile (libmvec-sysdep_routines):
|
||||
Added build of SSE, AVX2 and AVX512 IFUNC versions.
|
||||
* sysdeps/x86_64/fpu/svml_s_wrapper_impl.h: Added 2 argument wrappers.
|
||||
* sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core.S: New file.
|
||||
* sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S: New file.
|
||||
* sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core.S: New file.
|
||||
* sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core_sse4.S: New file.
|
||||
* sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core.S: New file.
|
||||
* sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core_avx2.S: New file.
|
||||
* sysdeps/x86_64/fpu/svml_s_powf16_core.S: New file.
|
||||
* sysdeps/x86_64/fpu/svml_s_powf4_core.S: New file.
|
||||
* sysdeps/x86_64/fpu/svml_s_powf8_core.S: New file.
|
||||
* sysdeps/x86_64/fpu/svml_s_powf8_core_avx.S: New file.
|
||||
* sysdeps/x86_64/fpu/svml_s_powf_data.S: New file.
|
||||
* sysdeps/x86_64/fpu/svml_s_powf_data.h: New file.
|
||||
* sysdeps/x86_64/fpu/test-float-vlen16-wrappers.c: Vector powf tests.
|
||||
* sysdeps/x86_64/fpu/test-float-vlen16.c: Likewise.
|
||||
* sysdeps/x86_64/fpu/test-float-vlen4-wrappers.c: Likewise.
|
||||
* sysdeps/x86_64/fpu/test-float-vlen4.c: Likewise.
|
||||
* sysdeps/x86_64/fpu/test-float-vlen8-avx2-wrappers.c: Likewise.
|
||||
* sysdeps/x86_64/fpu/test-float-vlen8-avx2.c: Likewise.
|
||||
* sysdeps/x86_64/fpu/test-float-vlen8-wrappers.c: Likewise.
|
||||
* sysdeps/x86_64/fpu/test-float-vlen8.c: Likewise.
|
||||
* math/test-float-vlen16.h: Fixed 2 argument macro.
|
||||
* math/test-float-vlen4.h: Likewise.
|
||||
* math/test-float-vlen8.h: Likewise.
|
||||
* NEWS: Mention addition of x86_64 vector powf.
|
||||
|
||||
2015-06-17 Joseph Myers <joseph@codesourcery.com>
|
||||
|
||||
* math/s_ctanhl.c [LDBL_MANT_DIG == 106] (LDBL_EPSILON): Undefine
|
||||
|
2
NEWS
2
NEWS
@ -55,7 +55,7 @@ Version 2.22
|
||||
condition in some applications.
|
||||
|
||||
* Added vector math library named libmvec with the following vectorized x86_64
|
||||
implementations: cos, cosf, sin, sinf, log, logf, exp, expf, pow.
|
||||
implementations: cos, cosf, sin, sinf, log, logf, exp, expf, pow, powf.
|
||||
The library can be disabled with --disable-mathvec. Use of the functions is
|
||||
enabled with -fopenmp -ffast-math starting from -O1 for GCC version >= 4.9.0.
|
||||
The library is linked in as needed when using -lm (no need to specify -lmvec
|
||||
|
@ -58,8 +58,8 @@ FLOAT scalar_func (FLOAT x) \
|
||||
}
|
||||
|
||||
// Wrapper from scalar 2 argument function to vector one.
|
||||
#define VECTOR_WRAPPER_ff(scalar_func, vector_func) \
|
||||
extern VEC_TYPE vector_func (VEC_TYPE); \
|
||||
#define VECTOR_WRAPPER_ff(scalar_func, vector_func) \
|
||||
extern VEC_TYPE vector_func (VEC_TYPE, VEC_TYPE); \
|
||||
FLOAT scalar_func (FLOAT x, FLOAT y) \
|
||||
{ \
|
||||
int i; \
|
||||
|
@ -58,8 +58,8 @@ FLOAT scalar_func (FLOAT x) \
|
||||
}
|
||||
|
||||
// Wrapper from scalar 2 argument function to vector one.
|
||||
#define VECTOR_WRAPPER_ff(scalar_func, vector_func) \
|
||||
extern VEC_TYPE vector_func (VEC_TYPE); \
|
||||
#define VECTOR_WRAPPER_ff(scalar_func, vector_func) \
|
||||
extern VEC_TYPE vector_func (VEC_TYPE, VEC_TYPE); \
|
||||
FLOAT scalar_func (FLOAT x, FLOAT y) \
|
||||
{ \
|
||||
int i; \
|
||||
|
@ -58,8 +58,8 @@ FLOAT scalar_func (FLOAT x) \
|
||||
}
|
||||
|
||||
// Wrapper from scalar 2 argument function to vector one.
|
||||
#define VECTOR_WRAPPER_ff(scalar_func, vector_func) \
|
||||
extern VEC_TYPE vector_func (VEC_TYPE); \
|
||||
#define VECTOR_WRAPPER_ff(scalar_func, vector_func) \
|
||||
extern VEC_TYPE vector_func (VEC_TYPE, VEC_TYPE); \
|
||||
FLOAT scalar_func (FLOAT x, FLOAT y) \
|
||||
{ \
|
||||
int i; \
|
||||
|
@ -9,6 +9,7 @@ GLIBC_2.22
|
||||
_ZGVbN4v_expf F
|
||||
_ZGVbN4v_logf F
|
||||
_ZGVbN4v_sinf F
|
||||
_ZGVbN4vv_powf F
|
||||
_ZGVcN4v_cos F
|
||||
_ZGVcN4v_exp F
|
||||
_ZGVcN4v_log F
|
||||
@ -18,6 +19,7 @@ GLIBC_2.22
|
||||
_ZGVcN8v_expf F
|
||||
_ZGVcN8v_logf F
|
||||
_ZGVcN8v_sinf F
|
||||
_ZGVcN8vv_powf F
|
||||
_ZGVdN4v_cos F
|
||||
_ZGVdN4v_exp F
|
||||
_ZGVdN4v_log F
|
||||
@ -27,10 +29,12 @@ GLIBC_2.22
|
||||
_ZGVdN8v_expf F
|
||||
_ZGVdN8v_logf F
|
||||
_ZGVdN8v_sinf F
|
||||
_ZGVdN8vv_powf F
|
||||
_ZGVeN16v_cosf F
|
||||
_ZGVeN16v_expf F
|
||||
_ZGVeN16v_logf F
|
||||
_ZGVeN16v_sinf F
|
||||
_ZGVeN16vv_powf F
|
||||
_ZGVeN8v_cos F
|
||||
_ZGVeN8v_exp F
|
||||
_ZGVeN8v_log F
|
||||
|
@ -46,6 +46,8 @@
|
||||
# define __DECL_SIMD_expf __DECL_SIMD_x86_64
|
||||
# undef __DECL_SIMD_pow
|
||||
# define __DECL_SIMD_pow __DECL_SIMD_x86_64
|
||||
# undef __DECL_SIMD_powf
|
||||
# define __DECL_SIMD_powf __DECL_SIMD_x86_64
|
||||
|
||||
/* Workaround to exclude unnecessary symbol aliases in libmvec
|
||||
while GCC creates the vector names based on scalar asm name.
|
||||
@ -71,6 +73,10 @@ __asm__ ("_ZGVbN2vv___pow_finite = _ZGVbN2vv_pow");
|
||||
__asm__ ("_ZGVcN4vv___pow_finite = _ZGVcN4vv_pow");
|
||||
__asm__ ("_ZGVdN4vv___pow_finite = _ZGVdN4vv_pow");
|
||||
__asm__ ("_ZGVeN8vv___pow_finite = _ZGVeN8vv_pow");
|
||||
__asm__ ("_ZGVbN4vv___powf_finite = _ZGVbN4vv_powf");
|
||||
__asm__ ("_ZGVcN8vv___powf_finite = _ZGVcN8vv_powf");
|
||||
__asm__ ("_ZGVdN8vv___powf_finite = _ZGVdN8vv_powf");
|
||||
__asm__ ("_ZGVeN16vv___powf_finite = _ZGVeN16vv_powf");
|
||||
|
||||
# endif
|
||||
#endif
|
||||
|
@ -15,7 +15,8 @@ libmvec-support += svml_d_cos2_core svml_d_cos4_core_avx \
|
||||
svml_s_expf4_core svml_s_expf8_core_avx svml_s_expf8_core \
|
||||
svml_s_expf16_core svml_s_expf_data svml_d_pow2_core \
|
||||
svml_d_pow4_core_avx svml_d_pow4_core svml_d_pow8_core \
|
||||
svml_d_pow_data \
|
||||
svml_d_pow_data svml_s_powf4_core svml_s_powf8_core_avx \
|
||||
svml_s_powf8_core svml_s_powf16_core svml_s_powf_data \
|
||||
init-arch
|
||||
endif
|
||||
|
||||
|
@ -9,5 +9,6 @@ libmvec {
|
||||
_ZGVbN4v_sinf; _ZGVcN8v_sinf; _ZGVdN8v_sinf; _ZGVeN16v_sinf;
|
||||
_ZGVbN4v_logf; _ZGVcN8v_logf; _ZGVdN8v_logf; _ZGVeN16v_logf;
|
||||
_ZGVbN4v_expf; _ZGVcN8v_expf; _ZGVdN8v_expf; _ZGVeN16v_expf;
|
||||
_ZGVbN4vv_powf; _ZGVcN8vv_powf; _ZGVdN8vv_powf; _ZGVeN16vv_powf;
|
||||
}
|
||||
}
|
||||
|
@ -1947,17 +1947,25 @@ ifloat: 4
|
||||
ildouble: 2
|
||||
ldouble: 2
|
||||
|
||||
Function: "pow_vlen16":
|
||||
float: 3
|
||||
|
||||
Function: "pow_vlen2":
|
||||
double: 1
|
||||
|
||||
Function: "pow_vlen4":
|
||||
double: 1
|
||||
float: 3
|
||||
|
||||
Function: "pow_vlen4_avx2":
|
||||
double: 1
|
||||
|
||||
Function: "pow_vlen8":
|
||||
double: 1
|
||||
float: 3
|
||||
|
||||
Function: "pow_vlen8_avx2":
|
||||
float: 3
|
||||
|
||||
Function: "sin":
|
||||
ildouble: 1
|
||||
|
@ -66,5 +66,7 @@ libmvec-sysdep_routines += svml_d_cos2_core_sse4 svml_d_cos4_core_avx2 \
|
||||
svml_d_exp4_core_avx2 svml_d_exp8_core_avx512 \
|
||||
svml_s_expf4_core_sse4 svml_s_expf8_core_avx2 \
|
||||
svml_s_expf16_core_avx512 svml_d_pow2_core_sse4 \
|
||||
svml_d_pow4_core_avx2 svml_d_pow8_core_avx512
|
||||
svml_d_pow4_core_avx2 svml_d_pow8_core_avx512 \
|
||||
svml_s_powf4_core_sse4 svml_s_powf8_core_avx2 \
|
||||
svml_s_powf16_core_avx512
|
||||
endif
|
||||
|
39
sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core.S
Normal file
39
sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core.S
Normal file
@ -0,0 +1,39 @@
|
||||
/* Multiple versions of vectorized powf.
|
||||
Copyright (C) 2014-2015 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <init-arch.h>
|
||||
|
||||
.text
|
||||
ENTRY (_ZGVeN16vv_powf)
|
||||
.type _ZGVeN16vv_powf, @gnu_indirect_function
|
||||
cmpl $0, KIND_OFFSET+__cpu_features(%rip)
|
||||
jne 1
|
||||
call __init_cpu_features
|
||||
1: leaq _ZGVeN16vv_powf_skx(%rip), %rax
|
||||
testl $bit_AVX512DQ_Usable, __cpu_features+FEATURE_OFFSET+index_AVX512DQ_Usable(%rip)
|
||||
jnz 3
|
||||
2: leaq _ZGVeN16vv_powf_knl(%rip), %rax
|
||||
testl $bit_AVX512F_Usable, __cpu_features+FEATURE_OFFSET+index_AVX512F_Usable(%rip)
|
||||
jnz 3
|
||||
leaq _ZGVeN16vv_powf_avx2_wrapper(%rip), %rax
|
||||
3: ret
|
||||
END (_ZGVeN16vv_powf)
|
||||
|
||||
#define _ZGVeN16vv_powf _ZGVeN16vv_powf_avx2_wrapper
|
||||
#include "../svml_s_powf16_core.S"
|
653
sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S
Normal file
653
sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S
Normal file
@ -0,0 +1,653 @@
|
||||
/* Function powf vectorized with AVX-512. KNL and SKX versions.
|
||||
Copyright (C) 2014-2015 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include "svml_s_powf_data.h"
|
||||
#include "svml_s_wrapper_impl.h"
|
||||
|
||||
/*
|
||||
ALGORITHM DESCRIPTION:
|
||||
|
||||
We are using the next identity : pow(x,y) = 2^(y * log2(x)).
|
||||
|
||||
1) log2(x) calculation
|
||||
Here we use the following formula.
|
||||
Let |x|=2^k1*X1, where k1 is integer, 1<=X1<2.
|
||||
Let C ~= 1/ln(2),
|
||||
Rcp1 ~= 1/X1, X2=Rcp1*X1,
|
||||
Rcp2 ~= 1/X2, X3=Rcp2*X2,
|
||||
Rcp3 ~= 1/X3, Rcp3C ~= C/X3.
|
||||
Then
|
||||
log2|x| = k1 + log2(1/Rcp1) + log2(1/Rcp2) + log2(C/Rcp3C) +
|
||||
log2(X1*Rcp1*Rcp2*Rcp3C/C),
|
||||
where X1*Rcp1*Rcp2*Rcp3C = C*(1+q), q is very small.
|
||||
|
||||
The values of Rcp1, log2(1/Rcp1), Rcp2, log2(1/Rcp2),
|
||||
Rcp3C, log2(C/Rcp3C) are taken from tables.
|
||||
Values of Rcp1, Rcp2, Rcp3C are such that RcpC=Rcp1*Rcp2*Rcp3C
|
||||
is exactly represented in target precision.
|
||||
|
||||
log2(X1*Rcp1*Rcp2*Rcp3C/C) = log2(1+q) = ln(1+q)/ln2 =
|
||||
= 1/(ln2)*q - 1/(2ln2)*q^2 + 1/(3ln2)*q^3 - ... =
|
||||
= 1/(C*ln2)*cq - 1/(2*C^2*ln2)*cq^2 + 1/(3*C^3*ln2)*cq^3 - ... =
|
||||
= (1 + a1)*cq + a2*cq^2 + a3*cq^3 + ...,
|
||||
where
|
||||
cq=X1*Rcp1*Rcp2*Rcp3C-C,
|
||||
a1=1/(C*ln(2))-1 is small,
|
||||
a2=1/(2*C^2*ln2),
|
||||
a3=1/(3*C^3*ln2),
|
||||
...
|
||||
Log2 result is split by three parts: HH+HL+HLL
|
||||
|
||||
2) Calculation of y*log2(x)
|
||||
Split y into YHi+YLo.
|
||||
Get high PH and medium PL parts of y*log2|x|.
|
||||
Get low PLL part of y*log2|x|.
|
||||
Now we have PH+PL+PLL ~= y*log2|x|.
|
||||
|
||||
3) Calculation of 2^(y*log2(x))
|
||||
Let's represent PH+PL+PLL in the form N + j/2^expK + Z,
|
||||
where expK=7 in this implementation, N and j are integers,
|
||||
0<=j<=2^expK-1, |Z|<2^(-expK-1). Hence
|
||||
2^(PH+PL+PLL) ~= 2^N * 2^(j/2^expK) * 2^Z,
|
||||
where 2^(j/2^expK) is stored in a table, and
|
||||
2^Z ~= 1 + B1*Z + B2*Z^2 ... + B5*Z^5.
|
||||
We compute 2^(PH+PL+PLL) as follows:
|
||||
Break PH into PHH + PHL, where PHH = N + j/2^expK.
|
||||
Z = PHL + PL + PLL
|
||||
Exp2Poly = B1*Z + B2*Z^2 ... + B5*Z^5
|
||||
Get 2^(j/2^expK) from table in the form THI+TLO.
|
||||
Now we have 2^(PH+PL+PLL) ~= 2^N * (THI + TLO) * (1 + Exp2Poly).
|
||||
Get significand of 2^(PH+PL+PLL) in the form ResHi+ResLo:
|
||||
ResHi := THI
|
||||
ResLo := THI * Exp2Poly + TLO
|
||||
Get exponent ERes of the result:
|
||||
Res := ResHi + ResLo:
|
||||
Result := ex(Res) + N. */
|
||||
|
||||
.text
|
||||
ENTRY (_ZGVeN16vv_powf_knl)
|
||||
#ifndef HAVE_AVX512_ASM_SUPPORT
|
||||
WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
|
||||
#else
|
||||
pushq %rbp
|
||||
cfi_adjust_cfa_offset (8)
|
||||
cfi_rel_offset (%rbp, 0)
|
||||
movq %rsp, %rbp
|
||||
cfi_def_cfa_register (%rbp)
|
||||
andq $-64, %rsp
|
||||
subq $1344, %rsp
|
||||
movq __svml_spow_data@GOTPCREL(%rip), %rdx
|
||||
vmovaps %zmm1, %zmm9
|
||||
vshuff32x4 $238, %zmm0, %zmm0, %zmm7
|
||||
kxnorw %k3, %k3, %k3
|
||||
vcvtps2pd %ymm0, %zmm14
|
||||
vcvtps2pd %ymm7, %zmm10
|
||||
movl $-1, %eax
|
||||
movq $-1, %rcx
|
||||
vpandd _ABSMASK(%rdx), %zmm9, %zmm4
|
||||
vmovups _ExpMask(%rdx), %zmm6
|
||||
|
||||
/* exponent bits selection */
|
||||
vpsrlq $20, %zmm14, %zmm13
|
||||
vshuff32x4 $238, %zmm9, %zmm9, %zmm8
|
||||
vpcmpd $5, _INF(%rdx), %zmm4, %k2
|
||||
vpsrlq $32, %zmm13, %zmm15
|
||||
vcvtps2pd %ymm8, %zmm2
|
||||
vmovups _Two10(%rdx), %zmm4
|
||||
vpmovqd %zmm15, %ymm12
|
||||
vcvtps2pd %ymm9, %zmm1
|
||||
vpsubd _NMINNORM(%rdx), %zmm0, %zmm3
|
||||
vpbroadcastd %eax, %zmm8{%k2}{z}
|
||||
vpcmpd $5, _NMAXVAL(%rdx), %zmm3, %k1
|
||||
|
||||
/* preserve mantissa, set input exponent to 2^(-10) */
|
||||
vmovaps %zmm6, %zmm3
|
||||
vpternlogq $248, %zmm6, %zmm10, %zmm4
|
||||
vpsrlq $20, %zmm10, %zmm10
|
||||
vpternlogq $234, _Two10(%rdx), %zmm14, %zmm3
|
||||
|
||||
/* reciprocal approximation good to at least 11 bits */
|
||||
vrcp28pd %zmm4, %zmm11
|
||||
vpsrlq $32, %zmm10, %zmm14
|
||||
vpbroadcastd %eax, %zmm7{%k1}{z}
|
||||
kxnorw %k1, %k1, %k1
|
||||
vrcp28pd %zmm3, %zmm5
|
||||
vpmovqd %zmm14, %ymm6
|
||||
vshufi32x4 $68, %zmm6, %zmm12, %zmm13
|
||||
vmovups _One(%rdx), %zmm6
|
||||
|
||||
/* round reciprocal to nearest integer, will have 1+9 mantissa bits */
|
||||
vrndscalepd $8, %zmm5, %zmm14
|
||||
|
||||
/* biased exponent in DP format */
|
||||
vshuff32x4 $238, %zmm13, %zmm13, %zmm5
|
||||
vrndscalepd $8, %zmm11, %zmm11
|
||||
vcmppd $30, _Threshold(%rdx), %zmm14, %k2
|
||||
vcvtdq2pd %ymm13, %zmm10
|
||||
vcvtdq2pd %ymm5, %zmm15
|
||||
|
||||
/* table lookup */
|
||||
vpsrlq $40, %zmm14, %zmm13
|
||||
vpxord %zmm5, %zmm5, %zmm5
|
||||
vgatherqpd _Log2Rcp_lookup(%rdx,%zmm13), %zmm5{%k3}
|
||||
vfmsub213pd %zmm6, %zmm14, %zmm3
|
||||
vfmsub213pd %zmm6, %zmm11, %zmm4
|
||||
vcmppd $30, _Threshold(%rdx), %zmm11, %k3
|
||||
vpbroadcastq %rcx, %zmm14{%k2}{z}
|
||||
|
||||
/* dpP= _dbT+lJ*T_ITEM_GRAN */
|
||||
kxnorw %k2, %k2, %k2
|
||||
vpsrlq $40, %zmm11, %zmm12
|
||||
vpxord %zmm6, %zmm6, %zmm6
|
||||
vpbroadcastq %rcx, %zmm11{%k3}{z}
|
||||
kxnorw %k3, %k3, %k3
|
||||
vgatherqpd _Log2Rcp_lookup(%rdx,%zmm12), %zmm6{%k1}
|
||||
vmovups _Bias1(%rdx), %zmm12
|
||||
vpternlogq $236, _Bias(%rdx), %zmm12, %zmm14
|
||||
vpternlogq $248, _Bias(%rdx), %zmm11, %zmm12
|
||||
vsubpd %zmm14, %zmm10, %zmm13
|
||||
vsubpd %zmm12, %zmm15, %zmm10
|
||||
vmovups _poly_coeff_3(%rdx), %zmm11
|
||||
vmovups _poly_coeff_4(%rdx), %zmm15
|
||||
vfmadd213pd %zmm15, %zmm4, %zmm11
|
||||
vmulpd %zmm4, %zmm4, %zmm12
|
||||
vmovaps %zmm15, %zmm14
|
||||
vmulpd %zmm3, %zmm3, %zmm15
|
||||
vfmadd231pd _poly_coeff_3(%rdx), %zmm3, %zmm14
|
||||
|
||||
/* reconstruction */
|
||||
vfmadd213pd %zmm4, %zmm12, %zmm11
|
||||
vfmadd213pd %zmm3, %zmm15, %zmm14
|
||||
vaddpd %zmm6, %zmm11, %zmm11
|
||||
vaddpd %zmm5, %zmm14, %zmm3
|
||||
vfmadd231pd _L2(%rdx), %zmm10, %zmm11
|
||||
vfmadd132pd _L2(%rdx), %zmm3, %zmm13
|
||||
vmulpd %zmm2, %zmm11, %zmm12
|
||||
vmulpd %zmm1, %zmm13, %zmm10
|
||||
vmulpd __dbInvLn2(%rdx), %zmm12, %zmm6
|
||||
|
||||
/* hi bits */
|
||||
vpsrlq $32, %zmm12, %zmm12
|
||||
vmulpd __dbInvLn2(%rdx), %zmm10, %zmm1
|
||||
|
||||
/* to round down; if dR is an integer we will get R = 1, which is ok */
|
||||
vsubpd __dbHALF(%rdx), %zmm6, %zmm4
|
||||
vpsrlq $32, %zmm10, %zmm11
|
||||
vpmovqd %zmm11, %ymm3
|
||||
vsubpd __dbHALF(%rdx), %zmm1, %zmm2
|
||||
vaddpd __dbShifter(%rdx), %zmm4, %zmm14
|
||||
vpmovqd %zmm12, %ymm4
|
||||
vshufi32x4 $68, %zmm4, %zmm3, %zmm5
|
||||
vpxord %zmm4, %zmm4, %zmm4
|
||||
vaddpd __dbShifter(%rdx), %zmm2, %zmm2
|
||||
|
||||
/* iAbsX = iAbsX&iAbsMask; */
|
||||
vpandd __iAbsMask(%rdx), %zmm5, %zmm11
|
||||
vpxord %zmm5, %zmm5, %zmm5
|
||||
vsubpd __dbShifter(%rdx), %zmm14, %zmm13
|
||||
|
||||
/* iRangeMask = (iAbsX>iDomainRange) */
|
||||
vpcmpgtd __iDomainRange(%rdx), %zmm11, %k1
|
||||
vsubpd __dbShifter(%rdx), %zmm2, %zmm15
|
||||
vpbroadcastd %eax, %zmm10{%k1}{z}
|
||||
vpternlogd $254, %zmm8, %zmm7, %zmm10
|
||||
|
||||
/* [0..1) */
|
||||
vsubpd %zmm15, %zmm1, %zmm1
|
||||
|
||||
/* low K bits */
|
||||
vpandq __lbLOWKBITS(%rdx), %zmm14, %zmm11
|
||||
vgatherqpd 13952(%rdx,%zmm11,8), %zmm5{%k3}
|
||||
vsubpd %zmm13, %zmm6, %zmm7
|
||||
vptestmd %zmm10, %zmm10, %k0
|
||||
vpandq __lbLOWKBITS(%rdx), %zmm2, %zmm10
|
||||
vmulpd __dbC1(%rdx), %zmm1, %zmm1
|
||||
vmulpd __dbC1(%rdx), %zmm7, %zmm3
|
||||
vpsrlq $11, %zmm2, %zmm8
|
||||
vpsrlq $11, %zmm14, %zmm2
|
||||
|
||||
/* NB : including +/- sign for the exponent!! */
|
||||
vpsllq $52, %zmm8, %zmm8
|
||||
kmovw %k0, %ecx
|
||||
vpsllq $52, %zmm2, %zmm6
|
||||
vfmadd213pd %zmm5, %zmm3, %zmm5
|
||||
vgatherqpd 13952(%rdx,%zmm10,8), %zmm4{%k2}
|
||||
vfmadd213pd %zmm4, %zmm1, %zmm4
|
||||
vpaddq %zmm6, %zmm5, %zmm10
|
||||
vcvtpd2ps %zmm10, %ymm12
|
||||
vpaddq %zmm8, %zmm4, %zmm7
|
||||
vcvtpd2ps %zmm7, %ymm11
|
||||
vshuff32x4 $68, %zmm12, %zmm11, %zmm1
|
||||
testl %ecx, %ecx
|
||||
jne .LBL_1_3
|
||||
|
||||
.LBL_1_2:
|
||||
cfi_remember_state
|
||||
vmovaps %zmm1, %zmm0
|
||||
movq %rbp, %rsp
|
||||
cfi_def_cfa_register (%rsp)
|
||||
popq %rbp
|
||||
cfi_adjust_cfa_offset (-8)
|
||||
cfi_restore (%rbp)
|
||||
ret
|
||||
|
||||
.LBL_1_3:
|
||||
cfi_restore_state
|
||||
vmovups %zmm0, 1152(%rsp)
|
||||
vmovups %zmm9, 1216(%rsp)
|
||||
vmovups %zmm1, 1280(%rsp)
|
||||
je .LBL_1_2
|
||||
|
||||
xorb %dl, %dl
|
||||
kmovw %k4, 1048(%rsp)
|
||||
xorl %eax, %eax
|
||||
kmovw %k5, 1040(%rsp)
|
||||
kmovw %k6, 1032(%rsp)
|
||||
kmovw %k7, 1024(%rsp)
|
||||
vmovups %zmm16, 960(%rsp)
|
||||
vmovups %zmm17, 896(%rsp)
|
||||
vmovups %zmm18, 832(%rsp)
|
||||
vmovups %zmm19, 768(%rsp)
|
||||
vmovups %zmm20, 704(%rsp)
|
||||
vmovups %zmm21, 640(%rsp)
|
||||
vmovups %zmm22, 576(%rsp)
|
||||
vmovups %zmm23, 512(%rsp)
|
||||
vmovups %zmm24, 448(%rsp)
|
||||
vmovups %zmm25, 384(%rsp)
|
||||
vmovups %zmm26, 320(%rsp)
|
||||
vmovups %zmm27, 256(%rsp)
|
||||
vmovups %zmm28, 192(%rsp)
|
||||
vmovups %zmm29, 128(%rsp)
|
||||
vmovups %zmm30, 64(%rsp)
|
||||
vmovups %zmm31, (%rsp)
|
||||
movq %rsi, 1064(%rsp)
|
||||
movq %rdi, 1056(%rsp)
|
||||
movq %r12, 1096(%rsp)
|
||||
cfi_offset_rel_rsp (12, 1096)
|
||||
movb %dl, %r12b
|
||||
movq %r13, 1088(%rsp)
|
||||
cfi_offset_rel_rsp (13, 1088)
|
||||
movl %ecx, %r13d
|
||||
movq %r14, 1080(%rsp)
|
||||
cfi_offset_rel_rsp (14, 1080)
|
||||
movl %eax, %r14d
|
||||
movq %r15, 1072(%rsp)
|
||||
cfi_offset_rel_rsp (15, 1072)
|
||||
cfi_remember_state
|
||||
|
||||
.LBL_1_6:
|
||||
btl %r14d, %r13d
|
||||
jc .LBL_1_12
|
||||
|
||||
.LBL_1_7:
|
||||
lea 1(%r14), %esi
|
||||
btl %esi, %r13d
|
||||
jc .LBL_1_10
|
||||
|
||||
.LBL_1_8:
|
||||
addb $1, %r12b
|
||||
addl $2, %r14d
|
||||
cmpb $16, %r12b
|
||||
jb .LBL_1_6
|
||||
|
||||
kmovw 1048(%rsp), %k4
|
||||
movq 1064(%rsp), %rsi
|
||||
kmovw 1040(%rsp), %k5
|
||||
movq 1056(%rsp), %rdi
|
||||
kmovw 1032(%rsp), %k6
|
||||
movq 1096(%rsp), %r12
|
||||
cfi_restore (%r12)
|
||||
movq 1088(%rsp), %r13
|
||||
cfi_restore (%r13)
|
||||
kmovw 1024(%rsp), %k7
|
||||
vmovups 960(%rsp), %zmm16
|
||||
vmovups 896(%rsp), %zmm17
|
||||
vmovups 832(%rsp), %zmm18
|
||||
vmovups 768(%rsp), %zmm19
|
||||
vmovups 704(%rsp), %zmm20
|
||||
vmovups 640(%rsp), %zmm21
|
||||
vmovups 576(%rsp), %zmm22
|
||||
vmovups 512(%rsp), %zmm23
|
||||
vmovups 448(%rsp), %zmm24
|
||||
vmovups 384(%rsp), %zmm25
|
||||
vmovups 320(%rsp), %zmm26
|
||||
vmovups 256(%rsp), %zmm27
|
||||
vmovups 192(%rsp), %zmm28
|
||||
vmovups 128(%rsp), %zmm29
|
||||
vmovups 64(%rsp), %zmm30
|
||||
vmovups (%rsp), %zmm31
|
||||
movq 1080(%rsp), %r14
|
||||
cfi_restore (%r14)
|
||||
movq 1072(%rsp), %r15
|
||||
cfi_restore (%r15)
|
||||
vmovups 1280(%rsp), %zmm1
|
||||
jmp .LBL_1_2
|
||||
|
||||
.LBL_1_10:
|
||||
cfi_restore_state
|
||||
movzbl %r12b, %r15d
|
||||
vmovss 1156(%rsp,%r15,8), %xmm0
|
||||
vmovss 1220(%rsp,%r15,8), %xmm1
|
||||
call powf@PLT
|
||||
vmovss %xmm0, 1284(%rsp,%r15,8)
|
||||
jmp .LBL_1_8
|
||||
|
||||
.LBL_1_12:
|
||||
movzbl %r12b, %r15d
|
||||
vmovss 1152(%rsp,%r15,8), %xmm0
|
||||
vmovss 1216(%rsp,%r15,8), %xmm1
|
||||
call powf@PLT
|
||||
vmovss %xmm0, 1280(%rsp,%r15,8)
|
||||
jmp .LBL_1_7
|
||||
#endif
|
||||
END (_ZGVeN16vv_powf_knl)
|
||||
|
||||
ENTRY (_ZGVeN16vv_powf_skx)
|
||||
#ifndef HAVE_AVX512_ASM_SUPPORT
|
||||
WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
|
||||
#else
|
||||
pushq %rbp
|
||||
cfi_adjust_cfa_offset (8)
|
||||
cfi_rel_offset (%rbp, 0)
|
||||
movq %rsp, %rbp
|
||||
cfi_def_cfa_register (%rbp)
|
||||
andq $-64, %rsp
|
||||
subq $1344, %rsp
|
||||
movq __svml_spow_data@GOTPCREL(%rip), %rax
|
||||
vextractf32x8 $1, %zmm1, %ymm14
|
||||
vextractf32x8 $1, %zmm0, %ymm15
|
||||
vpsubd _NMINNORM(%rax), %zmm0, %zmm9
|
||||
vmovups %zmm26, 1280(%rsp)
|
||||
vmovups _ExpMask(%rax), %zmm6
|
||||
vpcmpd $1, _NMAXVAL(%rax), %zmm9, %k1
|
||||
vcvtps2pd %ymm0, %zmm5
|
||||
vcvtps2pd %ymm1, %zmm12
|
||||
kxnorw %k3, %k3, %k3
|
||||
|
||||
/* exponent bits selection */
|
||||
vpsrlq $20, %zmm5, %zmm3
|
||||
vpsrlq $32, %zmm3, %zmm2
|
||||
vpmovqd %zmm2, %ymm11
|
||||
vcvtps2pd %ymm14, %zmm13
|
||||
vmovups .L_2il0floatpacket.23(%rip), %zmm14
|
||||
vmovaps %zmm14, %zmm26
|
||||
vpandd _ABSMASK(%rax), %zmm1, %zmm8
|
||||
vpcmpd $1, _INF(%rax), %zmm8, %k2
|
||||
vpandnd %zmm9, %zmm9, %zmm26{%k1}
|
||||
vmovups _Two10(%rax), %zmm9
|
||||
kxnorw %k1, %k1, %k1
|
||||
vcvtps2pd %ymm15, %zmm4
|
||||
vmovaps %zmm14, %zmm15
|
||||
|
||||
/* preserve mantissa, set input exponent to 2^(-10) */
|
||||
vpternlogq $248, %zmm6, %zmm4, %zmm9
|
||||
vpsrlq $20, %zmm4, %zmm4
|
||||
|
||||
/* reciprocal approximation good to at least 11 bits */
|
||||
vrcp14pd %zmm9, %zmm10
|
||||
|
||||
/* round reciprocal to nearest integer, will have 1+9 mantissa bits */
|
||||
vrndscalepd $8, %zmm10, %zmm3
|
||||
vmovups _One(%rax), %zmm10
|
||||
vfmsub213pd %zmm10, %zmm3, %zmm9
|
||||
vpandnd %zmm8, %zmm8, %zmm15{%k2}
|
||||
vmovaps %zmm6, %zmm8
|
||||
vpternlogq $234, _Two10(%rax), %zmm5, %zmm8
|
||||
vpsrlq $32, %zmm4, %zmm5
|
||||
vrcp14pd %zmm8, %zmm7
|
||||
vpmovqd %zmm5, %ymm6
|
||||
vrndscalepd $8, %zmm7, %zmm2
|
||||
vfmsub213pd %zmm10, %zmm2, %zmm8
|
||||
|
||||
/* table lookup */
|
||||
vpsrlq $40, %zmm2, %zmm10
|
||||
vinserti32x8 $1, %ymm6, %zmm11, %zmm4
|
||||
vpsrlq $40, %zmm3, %zmm11
|
||||
|
||||
/* biased exponent in DP format */
|
||||
vextracti32x8 $1, %zmm4, %ymm7
|
||||
vcvtdq2pd %ymm4, %zmm6
|
||||
vpmovqd %zmm10, %ymm4
|
||||
vpmovqd %zmm11, %ymm5
|
||||
vpxord %zmm10, %zmm10, %zmm10
|
||||
vgatherdpd _Log2Rcp_lookup(%rax,%ymm4), %zmm10{%k3}
|
||||
vpbroadcastq .L_2il0floatpacket.24(%rip), %zmm4
|
||||
vpxord %zmm11, %zmm11, %zmm11
|
||||
vcvtdq2pd %ymm7, %zmm7
|
||||
vgatherdpd _Log2Rcp_lookup(%rax,%ymm5), %zmm11{%k1}
|
||||
vmovups _Threshold(%rax), %zmm5
|
||||
vcmppd $21, %zmm2, %zmm5, %k2
|
||||
vcmppd $21, %zmm3, %zmm5, %k3
|
||||
vmovups _Bias1(%rax), %zmm3
|
||||
vmovaps %zmm4, %zmm2
|
||||
vpandnq %zmm5, %zmm5, %zmm2{%k2}
|
||||
vpternlogq $236, _Bias(%rax), %zmm3, %zmm2
|
||||
|
||||
/* dpP= _dbT+lJ*T_ITEM_GRAN */
|
||||
kxnorw %k2, %k2, %k2
|
||||
vpandnq %zmm5, %zmm5, %zmm4{%k3}
|
||||
vpternlogq $248, _Bias(%rax), %zmm4, %zmm3
|
||||
vsubpd %zmm2, %zmm6, %zmm4
|
||||
vmovups _poly_coeff_3(%rax), %zmm6
|
||||
vmovups _poly_coeff_4(%rax), %zmm2
|
||||
vsubpd %zmm3, %zmm7, %zmm5
|
||||
vmulpd %zmm8, %zmm8, %zmm7
|
||||
vfmadd213pd %zmm2, %zmm9, %zmm6
|
||||
kxnorw %k3, %k3, %k3
|
||||
vmovaps %zmm2, %zmm3
|
||||
vmulpd %zmm9, %zmm9, %zmm2
|
||||
vfmadd231pd _poly_coeff_3(%rax), %zmm8, %zmm3
|
||||
|
||||
/* reconstruction */
|
||||
vfmadd213pd %zmm9, %zmm2, %zmm6
|
||||
vfmadd213pd %zmm8, %zmm7, %zmm3
|
||||
vaddpd %zmm11, %zmm6, %zmm8
|
||||
vaddpd %zmm10, %zmm3, %zmm9
|
||||
vfmadd231pd _L2(%rax), %zmm5, %zmm8
|
||||
vfmadd132pd _L2(%rax), %zmm9, %zmm4
|
||||
vmulpd %zmm13, %zmm8, %zmm13
|
||||
vmulpd %zmm12, %zmm4, %zmm3
|
||||
vmulpd __dbInvLn2(%rax), %zmm13, %zmm10
|
||||
vmulpd __dbInvLn2(%rax), %zmm3, %zmm8
|
||||
|
||||
/* hi bits */
|
||||
vpsrlq $32, %zmm3, %zmm4
|
||||
vpsrlq $32, %zmm13, %zmm13
|
||||
|
||||
/* to round down; if dR is an integer we will get R = 1, which is ok */
|
||||
vsubpd __dbHALF(%rax), %zmm8, %zmm12
|
||||
vpmovqd %zmm4, %ymm5
|
||||
vpmovqd %zmm13, %ymm2
|
||||
vsubpd __dbHALF(%rax), %zmm10, %zmm9
|
||||
vaddpd __dbShifter(%rax), %zmm12, %zmm7
|
||||
vaddpd __dbShifter(%rax), %zmm9, %zmm9
|
||||
vsubpd __dbShifter(%rax), %zmm7, %zmm11
|
||||
vsubpd __dbShifter(%rax), %zmm9, %zmm12
|
||||
vinserti32x8 $1, %ymm2, %zmm5, %zmm3
|
||||
|
||||
/* iAbsX = iAbsX&iAbsMask */
|
||||
vpandd __iAbsMask(%rax), %zmm3, %zmm4
|
||||
|
||||
/* iRangeMask = (iAbsX>iDomainRange) */
|
||||
vpcmpd $2, __iDomainRange(%rax), %zmm4, %k1
|
||||
vpandnd %zmm4, %zmm4, %zmm14{%k1}
|
||||
vpternlogd $254, %zmm15, %zmm26, %zmm14
|
||||
|
||||
/* [0..1) */
|
||||
vsubpd %zmm11, %zmm8, %zmm15
|
||||
vsubpd %zmm12, %zmm10, %zmm26
|
||||
vptestmd %zmm14, %zmm14, %k0
|
||||
vpsrlq $11, %zmm7, %zmm8
|
||||
vpsrlq $11, %zmm9, %zmm10
|
||||
vmulpd __dbC1(%rax), %zmm26, %zmm26
|
||||
vmulpd __dbC1(%rax), %zmm15, %zmm15
|
||||
|
||||
/* NB : including +/- sign for the exponent!! */
|
||||
vpsllq $52, %zmm10, %zmm13
|
||||
vpsllq $52, %zmm8, %zmm12
|
||||
kmovw %k0, %ecx
|
||||
|
||||
/* low K bits */
|
||||
vpandq __lbLOWKBITS(%rax), %zmm9, %zmm14
|
||||
vpandq __lbLOWKBITS(%rax), %zmm7, %zmm6
|
||||
vpmovqd %zmm14, %ymm7
|
||||
vpmovqd %zmm6, %ymm9
|
||||
vpxord %zmm2, %zmm2, %zmm2
|
||||
vgatherdpd 13952(%rax,%ymm7,8), %zmm2{%k3}
|
||||
vfmadd213pd %zmm2, %zmm26, %zmm2
|
||||
vpaddq %zmm13, %zmm2, %zmm2
|
||||
vcvtpd2ps %zmm2, %ymm4
|
||||
vpxord %zmm11, %zmm11, %zmm11
|
||||
vgatherdpd 13952(%rax,%ymm9,8), %zmm11{%k2}
|
||||
vfmadd213pd %zmm11, %zmm15, %zmm11
|
||||
vpaddq %zmm12, %zmm11, %zmm3
|
||||
vcvtpd2ps %zmm3, %ymm5
|
||||
vinsertf32x8 $1, %ymm4, %zmm5, %zmm2
|
||||
testl %ecx, %ecx
|
||||
jne .LBL_2_3
|
||||
|
||||
.LBL_2_2:
|
||||
cfi_remember_state
|
||||
vmovups 1280(%rsp), %zmm26
|
||||
vmovaps %zmm2, %zmm0
|
||||
movq %rbp, %rsp
|
||||
cfi_def_cfa_register (%rsp)
|
||||
popq %rbp
|
||||
cfi_adjust_cfa_offset (-8)
|
||||
cfi_restore (%rbp)
|
||||
ret
|
||||
|
||||
.LBL_2_3:
|
||||
cfi_restore_state
|
||||
vmovups %zmm0, 1088(%rsp)
|
||||
vmovups %zmm1, 1152(%rsp)
|
||||
vmovups %zmm2, 1216(%rsp)
|
||||
je .LBL_2_2
|
||||
|
||||
xorb %dl, %dl
|
||||
xorl %eax, %eax
|
||||
kmovw %k4, 984(%rsp)
|
||||
kmovw %k5, 976(%rsp)
|
||||
kmovw %k6, 968(%rsp)
|
||||
kmovw %k7, 960(%rsp)
|
||||
vmovups %zmm16, 896(%rsp)
|
||||
vmovups %zmm17, 832(%rsp)
|
||||
vmovups %zmm18, 768(%rsp)
|
||||
vmovups %zmm19, 704(%rsp)
|
||||
vmovups %zmm20, 640(%rsp)
|
||||
vmovups %zmm21, 576(%rsp)
|
||||
vmovups %zmm22, 512(%rsp)
|
||||
vmovups %zmm23, 448(%rsp)
|
||||
vmovups %zmm24, 384(%rsp)
|
||||
vmovups %zmm25, 320(%rsp)
|
||||
vmovups %zmm27, 256(%rsp)
|
||||
vmovups %zmm28, 192(%rsp)
|
||||
vmovups %zmm29, 128(%rsp)
|
||||
vmovups %zmm30, 64(%rsp)
|
||||
vmovups %zmm31, (%rsp)
|
||||
movq %rsi, 1000(%rsp)
|
||||
movq %rdi, 992(%rsp)
|
||||
movq %r12, 1032(%rsp)
|
||||
cfi_offset_rel_rsp (12, 1032)
|
||||
movb %dl, %r12b
|
||||
movq %r13, 1024(%rsp)
|
||||
cfi_offset_rel_rsp (13, 1024)
|
||||
movl %ecx, %r13d
|
||||
movq %r14, 1016(%rsp)
|
||||
cfi_offset_rel_rsp (14, 1016)
|
||||
movl %eax, %r14d
|
||||
movq %r15, 1008(%rsp)
|
||||
cfi_offset_rel_rsp (15, 1008)
|
||||
cfi_remember_state
|
||||
|
||||
.LBL_2_6:
|
||||
btl %r14d, %r13d
|
||||
jc .LBL_2_12
|
||||
|
||||
.LBL_2_7:
|
||||
lea 1(%r14), %esi
|
||||
btl %esi, %r13d
|
||||
jc .LBL_2_10
|
||||
|
||||
.LBL_2_8:
|
||||
incb %r12b
|
||||
addl $2, %r14d
|
||||
cmpb $16, %r12b
|
||||
jb .LBL_2_6
|
||||
|
||||
kmovw 984(%rsp), %k4
|
||||
kmovw 976(%rsp), %k5
|
||||
kmovw 968(%rsp), %k6
|
||||
kmovw 960(%rsp), %k7
|
||||
vmovups 896(%rsp), %zmm16
|
||||
vmovups 832(%rsp), %zmm17
|
||||
vmovups 768(%rsp), %zmm18
|
||||
vmovups 704(%rsp), %zmm19
|
||||
vmovups 640(%rsp), %zmm20
|
||||
vmovups 576(%rsp), %zmm21
|
||||
vmovups 512(%rsp), %zmm22
|
||||
vmovups 448(%rsp), %zmm23
|
||||
vmovups 384(%rsp), %zmm24
|
||||
vmovups 320(%rsp), %zmm25
|
||||
vmovups 256(%rsp), %zmm27
|
||||
vmovups 192(%rsp), %zmm28
|
||||
vmovups 128(%rsp), %zmm29
|
||||
vmovups 64(%rsp), %zmm30
|
||||
vmovups (%rsp), %zmm31
|
||||
vmovups 1216(%rsp), %zmm2
|
||||
movq 1000(%rsp), %rsi
|
||||
movq 992(%rsp), %rdi
|
||||
movq 1032(%rsp), %r12
|
||||
cfi_restore (%r12)
|
||||
movq 1024(%rsp), %r13
|
||||
cfi_restore (%r13)
|
||||
movq 1016(%rsp), %r14
|
||||
cfi_restore (%r14)
|
||||
movq 1008(%rsp), %r15
|
||||
cfi_restore (%r15)
|
||||
jmp .LBL_2_2
|
||||
|
||||
.LBL_2_10:
|
||||
cfi_restore_state
|
||||
movzbl %r12b, %r15d
|
||||
vmovss 1156(%rsp,%r15,8), %xmm1
|
||||
vzeroupper
|
||||
vmovss 1092(%rsp,%r15,8), %xmm0
|
||||
call powf@PLT
|
||||
vmovss %xmm0, 1220(%rsp,%r15,8)
|
||||
jmp .LBL_2_8
|
||||
|
||||
.LBL_2_12:
|
||||
movzbl %r12b, %r15d
|
||||
vmovss 1152(%rsp,%r15,8), %xmm1
|
||||
vzeroupper
|
||||
vmovss 1088(%rsp,%r15,8), %xmm0
|
||||
call powf@PLT
|
||||
vmovss %xmm0, 1216(%rsp,%r15,8)
|
||||
jmp .LBL_2_7
|
||||
#endif
|
||||
END (_ZGVeN16vv_powf_skx)
|
||||
|
||||
.section .rodata, "a"
|
||||
.L_2il0floatpacket.23:
|
||||
.long 0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff
|
||||
.type .L_2il0floatpacket.23,@object
|
||||
.L_2il0floatpacket.24:
|
||||
.long 0xffffffff,0xffffffff
|
||||
.type .L_2il0floatpacket.24,@object
|
38
sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core.S
Normal file
38
sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core.S
Normal file
@ -0,0 +1,38 @@
|
||||
/* Multiple versions of vectorized powf.
|
||||
Copyright (C) 2014-2015 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <init-arch.h>
|
||||
|
||||
.text
|
||||
ENTRY (_ZGVbN4vv_powf)
|
||||
.type _ZGVbN4vv_powf, @gnu_indirect_function
|
||||
cmpl $0, KIND_OFFSET+__cpu_features(%rip)
|
||||
jne 1f
|
||||
call __init_cpu_features
|
||||
1: leaq _ZGVbN4vv_powf_sse4(%rip), %rax
|
||||
testl $bit_SSE4_1, __cpu_features+CPUID_OFFSET+index_SSE4_1(%rip)
|
||||
jz 2f
|
||||
ret
|
||||
2: leaq _ZGVbN4vv_powf_sse2(%rip), %rax
|
||||
ret
|
||||
END (_ZGVbN4vv_powf)
|
||||
libmvec_hidden_def (_ZGVbN4vv_powf)
|
||||
|
||||
#define _ZGVbN4vv_powf _ZGVbN4vv_powf_sse2
|
||||
#include "../svml_s_powf4_core.S"
|
374
sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core_sse4.S
Normal file
374
sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core_sse4.S
Normal file
@ -0,0 +1,374 @@
|
||||
/* Function powf vectorized with SSE4.
|
||||
Copyright (C) 2014-2015 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include "svml_s_powf_data.h"
|
||||
|
||||
.text
|
||||
ENTRY (_ZGVbN4vv_powf_sse4)
|
||||
/*
|
||||
ALGORITHM DESCRIPTION:
|
||||
|
||||
We are using the next identity: pow(x,y) = 2^(y * log2(x)).
|
||||
|
||||
1) log2(x) calculation
|
||||
Here we use the following formula.
|
||||
Let |x|=2^k1*X1, where k1 is integer, 1<=X1<2.
|
||||
Let C ~= 1/ln(2),
|
||||
Rcp1 ~= 1/X1, X2=Rcp1*X1,
|
||||
Rcp2 ~= 1/X2, X3=Rcp2*X2,
|
||||
Rcp3 ~= 1/X3, Rcp3C ~= C/X3.
|
||||
Then
|
||||
log2|x| = k1 + log2(1/Rcp1) + log2(1/Rcp2) + log2(C/Rcp3C) +
|
||||
log2(X1*Rcp1*Rcp2*Rcp3C/C),
|
||||
where X1*Rcp1*Rcp2*Rcp3C = C*(1+q), q is very small.
|
||||
|
||||
The values of Rcp1, log2(1/Rcp1), Rcp2, log2(1/Rcp2),
|
||||
Rcp3C, log2(C/Rcp3C) are taken from tables.
|
||||
Values of Rcp1, Rcp2, Rcp3C are such that RcpC=Rcp1*Rcp2*Rcp3C
|
||||
is exactly represented in target precision.
|
||||
|
||||
log2(X1*Rcp1*Rcp2*Rcp3C/C) = log2(1+q) = ln(1+q)/ln2 =
|
||||
= 1/(ln2)*q - 1/(2ln2)*q^2 + 1/(3ln2)*q^3 - ... =
|
||||
= 1/(C*ln2)*cq - 1/(2*C^2*ln2)*cq^2 + 1/(3*C^3*ln2)*cq^3 - ... =
|
||||
= (1 + a1)*cq + a2*cq^2 + a3*cq^3 + ...,
|
||||
where
|
||||
cq=X1*Rcp1*Rcp2*Rcp3C-C,
|
||||
a1=1/(C*ln(2))-1 is small,
|
||||
a2=1/(2*C^2*ln2),
|
||||
a3=1/(3*C^3*ln2),
|
||||
...
|
||||
Log2 result is split by three parts: HH+HL+HLL
|
||||
|
||||
2) Calculation of y*log2(x)
|
||||
Split y into YHi+YLo.
|
||||
Get high PH and medium PL parts of y*log2|x|.
|
||||
Get low PLL part of y*log2|x|.
|
||||
Now we have PH+PL+PLL ~= y*log2|x|.
|
||||
|
||||
3) Calculation of 2^(y*log2(x))
|
||||
Let's represent PH+PL+PLL in the form N + j/2^expK + Z,
|
||||
where expK=7 in this implementation, N and j are integers,
|
||||
0<=j<=2^expK-1, |Z|<2^(-expK-1). Hence
|
||||
2^(PH+PL+PLL) ~= 2^N * 2^(j/2^expK) * 2^Z,
|
||||
where 2^(j/2^expK) is stored in a table, and
|
||||
2^Z ~= 1 + B1*Z + B2*Z^2 ... + B5*Z^5.
|
||||
We compute 2^(PH+PL+PLL) as follows:
|
||||
Break PH into PHH + PHL, where PHH = N + j/2^expK.
|
||||
Z = PHL + PL + PLL
|
||||
Exp2Poly = B1*Z + B2*Z^2 ... + B5*Z^5
|
||||
Get 2^(j/2^expK) from table in the form THI+TLO.
|
||||
Now we have 2^(PH+PL+PLL) ~= 2^N * (THI + TLO) * (1 + Exp2Poly).
|
||||
Get significand of 2^(PH+PL+PLL) in the form ResHi+ResLo:
|
||||
ResHi := THI
|
||||
ResLo := THI * Exp2Poly + TLO
|
||||
Get exponent ERes of the result:
|
||||
Res := ResHi + ResLo:
|
||||
Result := ex(Res) + N. */
|
||||
|
||||
pushq %rbp
|
||||
cfi_adjust_cfa_offset (8)
|
||||
cfi_rel_offset (%rbp, 0)
|
||||
movq %rsp, %rbp
|
||||
cfi_def_cfa_register (%rbp)
|
||||
andq $-64, %rsp
|
||||
subq $256, %rsp
|
||||
movaps %xmm0, %xmm3
|
||||
movhlps %xmm0, %xmm3
|
||||
movaps %xmm1, %xmm5
|
||||
movups %xmm8, 112(%rsp)
|
||||
movaps %xmm5, %xmm2
|
||||
cvtps2pd %xmm3, %xmm8
|
||||
cvtps2pd %xmm5, %xmm7
|
||||
movups %xmm9, 96(%rsp)
|
||||
movaps %xmm0, %xmm4
|
||||
cvtps2pd %xmm0, %xmm9
|
||||
movq __svml_spow_data@GOTPCREL(%rip), %rdx
|
||||
movups %xmm10, 176(%rsp)
|
||||
movups %xmm13, 48(%rsp)
|
||||
movups _ExpMask(%rdx), %xmm6
|
||||
|
||||
/* preserve mantissa, set input exponent to 2^(-10) */
|
||||
movaps %xmm6, %xmm10
|
||||
andps %xmm8, %xmm6
|
||||
andps %xmm9, %xmm10
|
||||
|
||||
/* exponent bits selection */
|
||||
psrlq $20, %xmm9
|
||||
orps _Two10(%rdx), %xmm6
|
||||
psrlq $20, %xmm8
|
||||
orps _Two10(%rdx), %xmm10
|
||||
|
||||
/* reciprocal approximation good to at least 11 bits */
|
||||
cvtpd2ps %xmm6, %xmm13
|
||||
cvtpd2ps %xmm10, %xmm1
|
||||
movlhps %xmm13, %xmm13
|
||||
movhlps %xmm5, %xmm2
|
||||
movlhps %xmm1, %xmm1
|
||||
movups %xmm12, 208(%rsp)
|
||||
rcpps %xmm13, %xmm12
|
||||
movups %xmm11, 80(%rsp)
|
||||
cvtps2pd %xmm2, %xmm11
|
||||
rcpps %xmm1, %xmm2
|
||||
movups %xmm14, 144(%rsp)
|
||||
cvtps2pd %xmm12, %xmm14
|
||||
movups %xmm15, 160(%rsp)
|
||||
cvtps2pd %xmm2, %xmm15
|
||||
shufps $221, %xmm8, %xmm9
|
||||
|
||||
/* round reciprocal to nearest integer, will have 1+9 mantissa bits */
|
||||
roundpd $0, %xmm14, %xmm14
|
||||
|
||||
/* biased exponent in DP format */
|
||||
pshufd $238, %xmm9, %xmm8
|
||||
roundpd $0, %xmm15, %xmm15
|
||||
cvtdq2pd %xmm8, %xmm1
|
||||
mulpd %xmm15, %xmm10
|
||||
mulpd %xmm14, %xmm6
|
||||
cvtdq2pd %xmm9, %xmm2
|
||||
subpd _One(%rdx), %xmm10
|
||||
subpd _One(%rdx), %xmm6
|
||||
|
||||
/* table lookup */
|
||||
movaps %xmm14, %xmm8
|
||||
movaps %xmm15, %xmm9
|
||||
psrlq $40, %xmm8
|
||||
psrlq $40, %xmm9
|
||||
movd %xmm8, %r8d
|
||||
movd %xmm9, %eax
|
||||
psubd _NMINNORM(%rdx), %xmm4
|
||||
movdqu _ABSMASK(%rdx), %xmm3
|
||||
pextrd $2, %xmm8, %r9d
|
||||
pand %xmm5, %xmm3
|
||||
movups _Threshold(%rdx), %xmm8
|
||||
pextrd $2, %xmm9, %ecx
|
||||
movaps %xmm8, %xmm9
|
||||
cmpltpd %xmm15, %xmm9
|
||||
cmpltpd %xmm14, %xmm8
|
||||
andps _Bias(%rdx), %xmm9
|
||||
movaps %xmm10, %xmm14
|
||||
andps _Bias(%rdx), %xmm8
|
||||
movaps %xmm6, %xmm15
|
||||
orps _Bias1(%rdx), %xmm9
|
||||
orps _Bias1(%rdx), %xmm8
|
||||
subpd %xmm9, %xmm2
|
||||
subpd %xmm8, %xmm1
|
||||
mulpd %xmm10, %xmm14
|
||||
mulpd %xmm6, %xmm15
|
||||
mulpd _L2(%rdx), %xmm2
|
||||
mulpd _L2(%rdx), %xmm1
|
||||
movups _poly_coeff_3(%rdx), %xmm9
|
||||
movaps %xmm9, %xmm8
|
||||
mulpd %xmm10, %xmm8
|
||||
mulpd %xmm6, %xmm9
|
||||
addpd _poly_coeff_4(%rdx), %xmm8
|
||||
addpd _poly_coeff_4(%rdx), %xmm9
|
||||
mulpd %xmm14, %xmm8
|
||||
mulpd %xmm15, %xmm9
|
||||
|
||||
/* reconstruction */
|
||||
addpd %xmm8, %xmm10
|
||||
addpd %xmm9, %xmm6
|
||||
movslq %eax, %rax
|
||||
movslq %r8d, %r8
|
||||
movslq %ecx, %rcx
|
||||
movslq %r9d, %r9
|
||||
movsd _Log2Rcp_lookup(%rdx,%rax), %xmm13
|
||||
movsd _Log2Rcp_lookup(%rdx,%r8), %xmm12
|
||||
movhpd _Log2Rcp_lookup(%rdx,%rcx), %xmm13
|
||||
movhpd _Log2Rcp_lookup(%rdx,%r9), %xmm12
|
||||
addpd %xmm10, %xmm13
|
||||
addpd %xmm6, %xmm12
|
||||
addpd %xmm13, %xmm2
|
||||
addpd %xmm12, %xmm1
|
||||
mulpd %xmm7, %xmm2
|
||||
mulpd %xmm11, %xmm1
|
||||
movups __dbInvLn2(%rdx), %xmm11
|
||||
movdqa %xmm4, %xmm12
|
||||
movaps %xmm11, %xmm10
|
||||
mulpd %xmm2, %xmm10
|
||||
mulpd %xmm1, %xmm11
|
||||
|
||||
/* to round down; if dR is an integer we will get R = 1, which is ok */
|
||||
movaps %xmm10, %xmm8
|
||||
movaps %xmm11, %xmm9
|
||||
subpd __dbHALF(%rdx), %xmm8
|
||||
subpd __dbHALF(%rdx), %xmm9
|
||||
addpd __dbShifter(%rdx), %xmm8
|
||||
addpd __dbShifter(%rdx), %xmm9
|
||||
movaps %xmm8, %xmm6
|
||||
movaps %xmm9, %xmm7
|
||||
subpd __dbShifter(%rdx), %xmm6
|
||||
subpd __dbShifter(%rdx), %xmm7
|
||||
|
||||
/* [0..1) */
|
||||
subpd %xmm6, %xmm10
|
||||
subpd %xmm7, %xmm11
|
||||
mulpd __dbC1(%rdx), %xmm10
|
||||
mulpd __dbC1(%rdx), %xmm11
|
||||
|
||||
/* hi bits */
|
||||
shufps $221, %xmm1, %xmm2
|
||||
movdqu _NMAXVAL(%rdx), %xmm1
|
||||
pcmpgtd %xmm1, %xmm12
|
||||
pcmpeqd %xmm1, %xmm4
|
||||
por %xmm4, %xmm12
|
||||
movdqa %xmm3, %xmm1
|
||||
movdqu _INF(%rdx), %xmm4
|
||||
pcmpgtd %xmm4, %xmm1
|
||||
pcmpeqd %xmm4, %xmm3
|
||||
|
||||
/* iAbsX = iAbsX&iAbsMask */
|
||||
pand __iAbsMask(%rdx), %xmm2
|
||||
por %xmm3, %xmm1
|
||||
|
||||
/* iRangeMask = (iAbsX>iDomainRange) */
|
||||
pcmpgtd __iDomainRange(%rdx), %xmm2
|
||||
por %xmm1, %xmm12
|
||||
movups __lbLOWKBITS(%rdx), %xmm3
|
||||
por %xmm2, %xmm12
|
||||
|
||||
/* low K bits */
|
||||
movaps %xmm3, %xmm2
|
||||
andps %xmm9, %xmm3
|
||||
andps %xmm8, %xmm2
|
||||
psrlq $11, %xmm8
|
||||
|
||||
/* dpP= _dbT+lJ*T_ITEM_GRAN */
|
||||
movd %xmm2, %r10d
|
||||
psrlq $11, %xmm9
|
||||
movd %xmm3, %ecx
|
||||
|
||||
/* NB : including +/- sign for the exponent!! */
|
||||
psllq $52, %xmm8
|
||||
psllq $52, %xmm9
|
||||
pextrw $4, %xmm2, %r11d
|
||||
pextrw $4, %xmm3, %r8d
|
||||
movmskps %xmm12, %eax
|
||||
shll $3, %r10d
|
||||
shll $3, %ecx
|
||||
shll $3, %r11d
|
||||
shll $3, %r8d
|
||||
movq 13952(%rdx,%r10), %xmm6
|
||||
movq 13952(%rdx,%rcx), %xmm7
|
||||
movhpd 13952(%rdx,%r11), %xmm6
|
||||
movhpd 13952(%rdx,%r8), %xmm7
|
||||
mulpd %xmm6, %xmm10
|
||||
mulpd %xmm7, %xmm11
|
||||
addpd %xmm10, %xmm6
|
||||
addpd %xmm11, %xmm7
|
||||
paddq %xmm8, %xmm6
|
||||
paddq %xmm9, %xmm7
|
||||
cvtpd2ps %xmm6, %xmm1
|
||||
cvtpd2ps %xmm7, %xmm4
|
||||
movlhps %xmm4, %xmm1
|
||||
testl %eax, %eax
|
||||
jne .LBL_1_3
|
||||
|
||||
.LBL_1_2:
|
||||
cfi_remember_state
|
||||
movups 112(%rsp), %xmm8
|
||||
movaps %xmm1, %xmm0
|
||||
movups 96(%rsp), %xmm9
|
||||
movups 176(%rsp), %xmm10
|
||||
movups 80(%rsp), %xmm11
|
||||
movups 208(%rsp), %xmm12
|
||||
movups 48(%rsp), %xmm13
|
||||
movups 144(%rsp), %xmm14
|
||||
movups 160(%rsp), %xmm15
|
||||
movq %rbp, %rsp
|
||||
cfi_def_cfa_register (%rsp)
|
||||
popq %rbp
|
||||
cfi_adjust_cfa_offset (-8)
|
||||
cfi_restore (%rbp)
|
||||
ret
|
||||
|
||||
.LBL_1_3:
|
||||
cfi_restore_state
|
||||
movups %xmm0, 64(%rsp)
|
||||
movups %xmm5, 128(%rsp)
|
||||
movups %xmm1, 192(%rsp)
|
||||
je .LBL_1_2
|
||||
|
||||
xorb %cl, %cl
|
||||
xorl %edx, %edx
|
||||
movq %rsi, 8(%rsp)
|
||||
movq %rdi, (%rsp)
|
||||
movq %r12, 40(%rsp)
|
||||
cfi_offset_rel_rsp (12, 40)
|
||||
movb %cl, %r12b
|
||||
movq %r13, 32(%rsp)
|
||||
cfi_offset_rel_rsp (13, 32)
|
||||
movl %eax, %r13d
|
||||
movq %r14, 24(%rsp)
|
||||
cfi_offset_rel_rsp (14, 24)
|
||||
movl %edx, %r14d
|
||||
movq %r15, 16(%rsp)
|
||||
cfi_offset_rel_rsp (15, 16)
|
||||
cfi_remember_state
|
||||
|
||||
.LBL_1_6:
|
||||
btl %r14d, %r13d
|
||||
jc .LBL_1_12
|
||||
|
||||
.LBL_1_7:
|
||||
lea 1(%r14), %esi
|
||||
btl %esi, %r13d
|
||||
jc .LBL_1_10
|
||||
|
||||
.LBL_1_8:
|
||||
incb %r12b
|
||||
addl $2, %r14d
|
||||
cmpb $16, %r12b
|
||||
jb .LBL_1_6
|
||||
|
||||
movq 8(%rsp), %rsi
|
||||
movq (%rsp), %rdi
|
||||
movq 40(%rsp), %r12
|
||||
cfi_restore (%r12)
|
||||
movq 32(%rsp), %r13
|
||||
cfi_restore (%r13)
|
||||
movq 24(%rsp), %r14
|
||||
cfi_restore (%r14)
|
||||
movq 16(%rsp), %r15
|
||||
cfi_restore (%r15)
|
||||
movups 192(%rsp), %xmm1
|
||||
jmp .LBL_1_2
|
||||
|
||||
.LBL_1_10:
|
||||
cfi_restore_state
|
||||
movzbl %r12b, %r15d
|
||||
movss 68(%rsp,%r15,8), %xmm0
|
||||
movss 132(%rsp,%r15,8), %xmm1
|
||||
|
||||
call powf@PLT
|
||||
|
||||
movss %xmm0, 196(%rsp,%r15,8)
|
||||
jmp .LBL_1_8
|
||||
|
||||
.LBL_1_12:
|
||||
movzbl %r12b, %r15d
|
||||
movss 64(%rsp,%r15,8), %xmm0
|
||||
movss 128(%rsp,%r15,8), %xmm1
|
||||
|
||||
call powf@PLT
|
||||
|
||||
movss %xmm0, 192(%rsp,%r15,8)
|
||||
jmp .LBL_1_7
|
||||
|
||||
END (_ZGVbN4vv_powf_sse4)
|
38
sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core.S
Normal file
38
sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core.S
Normal file
@ -0,0 +1,38 @@
|
||||
/* Multiple versions of vectorized powf.
|
||||
Copyright (C) 2014-2015 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <init-arch.h>
|
||||
|
||||
.text
|
||||
ENTRY (_ZGVdN8vv_powf)
|
||||
.type _ZGVdN8vv_powf, @gnu_indirect_function
|
||||
cmpl $0, KIND_OFFSET+__cpu_features(%rip)
|
||||
jne 1f
|
||||
call __init_cpu_features
|
||||
1: leaq _ZGVdN8vv_powf_avx2(%rip), %rax
|
||||
testl $bit_AVX2_Usable, __cpu_features+FEATURE_OFFSET+index_AVX2_Usable(%rip)
|
||||
jz 2f
|
||||
ret
|
||||
2: leaq _ZGVdN8vv_powf_sse_wrapper(%rip), %rax
|
||||
ret
|
||||
END (_ZGVdN8vv_powf)
|
||||
libmvec_hidden_def (_ZGVdN8vv_powf)
|
||||
|
||||
#define _ZGVdN8vv_powf _ZGVdN8vv_powf_sse_wrapper
|
||||
#include "../svml_s_powf8_core.S"
|
357
sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core_avx2.S
Normal file
357
sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core_avx2.S
Normal file
@ -0,0 +1,357 @@
|
||||
/* Function powf vectorized with AVX2.
|
||||
Copyright (C) 2014-2015 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include "svml_s_powf_data.h"
|
||||
|
||||
.text
|
||||
ENTRY(_ZGVdN8vv_powf_avx2)
|
||||
/*
|
||||
ALGORITHM DESCRIPTION:
|
||||
|
||||
We are using the next identity : pow(x,y) = 2^(y * log2(x)).
|
||||
|
||||
1) log2(x) calculation
|
||||
Here we use the following formula.
|
||||
Let |x|=2^k1*X1, where k1 is integer, 1<=X1<2.
|
||||
Let C ~= 1/ln(2),
|
||||
Rcp1 ~= 1/X1, X2=Rcp1*X1,
|
||||
Rcp2 ~= 1/X2, X3=Rcp2*X2,
|
||||
Rcp3 ~= 1/X3, Rcp3C ~= C/X3.
|
||||
Then
|
||||
log2|x| = k1 + log2(1/Rcp1) + log2(1/Rcp2) + log2(C/Rcp3C) +
|
||||
log2(X1*Rcp1*Rcp2*Rcp3C/C),
|
||||
where X1*Rcp1*Rcp2*Rcp3C = C*(1+q), q is very small.
|
||||
|
||||
The values of Rcp1, log2(1/Rcp1), Rcp2, log2(1/Rcp2),
|
||||
Rcp3C, log2(C/Rcp3C) are taken from tables.
|
||||
Values of Rcp1, Rcp2, Rcp3C are such that RcpC=Rcp1*Rcp2*Rcp3C
|
||||
is exactly represented in target precision.
|
||||
|
||||
log2(X1*Rcp1*Rcp2*Rcp3C/C) = log2(1+q) = ln(1+q)/ln2 =
|
||||
= 1/(ln2)*q - 1/(2ln2)*q^2 + 1/(3ln2)*q^3 - ... =
|
||||
= 1/(C*ln2)*cq - 1/(2*C^2*ln2)*cq^2 + 1/(3*C^3*ln2)*cq^3 - ... =
|
||||
= (1 + a1)*cq + a2*cq^2 + a3*cq^3 + ...,
|
||||
where
|
||||
cq=X1*Rcp1*Rcp2*Rcp3C-C,
|
||||
a1=1/(C*ln(2))-1 is small,
|
||||
a2=1/(2*C^2*ln2),
|
||||
a3=1/(3*C^3*ln2),
|
||||
...
|
||||
Log2 result is split by three parts: HH+HL+HLL
|
||||
|
||||
2) Calculation of y*log2(x)
|
||||
Split y into YHi+YLo.
|
||||
Get high PH and medium PL parts of y*log2|x|.
|
||||
Get low PLL part of y*log2|x|.
|
||||
Now we have PH+PL+PLL ~= y*log2|x|.
|
||||
|
||||
3) Calculation of 2^(y*log2(x))
|
||||
Let's represent PH+PL+PLL in the form N + j/2^expK + Z,
|
||||
where expK=7 in this implementation, N and j are integers,
|
||||
0<=j<=2^expK-1, |Z|<2^(-expK-1). Hence
|
||||
2^(PH+PL+PLL) ~= 2^N * 2^(j/2^expK) * 2^Z,
|
||||
where 2^(j/2^expK) is stored in a table, and
|
||||
2^Z ~= 1 + B1*Z + B2*Z^2 ... + B5*Z^5.
|
||||
We compute 2^(PH+PL+PLL) as follows:
|
||||
Break PH into PHH + PHL, where PHH = N + j/2^expK.
|
||||
Z = PHL + PL + PLL
|
||||
Exp2Poly = B1*Z + B2*Z^2 ... + B5*Z^5
|
||||
Get 2^(j/2^expK) from table in the form THI+TLO.
|
||||
Now we have 2^(PH+PL+PLL) ~= 2^N * (THI + TLO) * (1 + Exp2Poly).
|
||||
Get significand of 2^(PH+PL+PLL) in the form ResHi+ResLo:
|
||||
ResHi := THI
|
||||
ResLo := THI * Exp2Poly + TLO
|
||||
Get exponent ERes of the result:
|
||||
Res := ResHi + ResLo:
|
||||
Result := ex(Res) + N. */
|
||||
|
||||
pushq %rbp
|
||||
cfi_adjust_cfa_offset (8)
|
||||
cfi_rel_offset (%rbp, 0)
|
||||
movq %rsp, %rbp
|
||||
cfi_def_cfa_register (%rbp)
|
||||
andq $-64, %rsp
|
||||
subq $448, %rsp
|
||||
lea __VPACK_ODD_ind.6357.0.1(%rip), %rcx
|
||||
vmovups %ymm14, 320(%rsp)
|
||||
|
||||
/* hi bits */
|
||||
lea __VPACK_ODD_ind.6358.0.1(%rip), %rax
|
||||
vmovups %ymm12, 256(%rsp)
|
||||
vmovups %ymm9, 96(%rsp)
|
||||
vmovups %ymm13, 224(%rsp)
|
||||
vmovups %ymm15, 352(%rsp)
|
||||
vmovups %ymm11, 384(%rsp)
|
||||
vmovups %ymm10, 288(%rsp)
|
||||
vmovups (%rcx), %ymm10
|
||||
vmovups %ymm8, 160(%rsp)
|
||||
vmovdqa %ymm1, %ymm9
|
||||
movq __svml_spow_data@GOTPCREL(%rip), %rdx
|
||||
vextractf128 $1, %ymm0, %xmm7
|
||||
vcvtps2pd %xmm0, %ymm14
|
||||
vcvtps2pd %xmm7, %ymm12
|
||||
vpsubd _NMINNORM(%rdx), %ymm0, %ymm7
|
||||
|
||||
/* preserve mantissa, set input exponent to 2^(-10) */
|
||||
vandpd _ExpMask(%rdx), %ymm14, %ymm3
|
||||
vandpd _ExpMask(%rdx), %ymm12, %ymm13
|
||||
|
||||
/* exponent bits selection */
|
||||
vpsrlq $20, %ymm12, %ymm12
|
||||
vpsrlq $20, %ymm14, %ymm14
|
||||
vextractf128 $1, %ymm9, %xmm2
|
||||
vcvtps2pd %xmm9, %ymm1
|
||||
vpand _ABSMASK(%rdx), %ymm9, %ymm8
|
||||
vcvtps2pd %xmm2, %ymm6
|
||||
vorpd _Two10(%rdx), %ymm3, %ymm2
|
||||
vorpd _Two10(%rdx), %ymm13, %ymm3
|
||||
|
||||
/* reciprocal approximation good to at least 11 bits */
|
||||
vcvtpd2ps %ymm2, %xmm5
|
||||
vcvtpd2ps %ymm3, %xmm15
|
||||
vrcpps %xmm5, %xmm4
|
||||
vrcpps %xmm15, %xmm11
|
||||
vcvtps2pd %xmm4, %ymm13
|
||||
vcvtps2pd %xmm11, %ymm4
|
||||
vpermps %ymm12, %ymm10, %ymm11
|
||||
|
||||
/* round reciprocal to nearest integer, will have 1+9 mantissa bits */
|
||||
vroundpd $0, %ymm13, %ymm12
|
||||
vpermps %ymm14, %ymm10, %ymm5
|
||||
vroundpd $0, %ymm4, %ymm14
|
||||
vmovupd _One(%rdx), %ymm4
|
||||
|
||||
/* table lookup */
|
||||
vpsrlq $40, %ymm12, %ymm10
|
||||
vfmsub213pd %ymm4, %ymm12, %ymm2
|
||||
vfmsub213pd %ymm4, %ymm14, %ymm3
|
||||
vcmpgt_oqpd _Threshold(%rdx), %ymm12, %ymm12
|
||||
vxorpd %ymm4, %ymm4, %ymm4
|
||||
vandpd _Bias(%rdx), %ymm12, %ymm12
|
||||
|
||||
/* biased exponent in DP format */
|
||||
vcvtdq2pd %xmm11, %ymm13
|
||||
vpcmpeqd %ymm11, %ymm11, %ymm11
|
||||
vgatherqpd %ymm11, _Log2Rcp_lookup(%rdx,%ymm10), %ymm4
|
||||
vpsrlq $40, %ymm14, %ymm10
|
||||
vcmpgt_oqpd _Threshold(%rdx), %ymm14, %ymm14
|
||||
vpcmpeqd %ymm11, %ymm11, %ymm11
|
||||
vandpd _Bias(%rdx), %ymm14, %ymm14
|
||||
vcvtdq2pd %xmm5, %ymm15
|
||||
vxorpd %ymm5, %ymm5, %ymm5
|
||||
vgatherqpd %ymm11, _Log2Rcp_lookup(%rdx,%ymm10), %ymm5
|
||||
vorpd _Bias1(%rdx), %ymm12, %ymm11
|
||||
vorpd _Bias1(%rdx), %ymm14, %ymm10
|
||||
vsubpd %ymm11, %ymm15, %ymm11
|
||||
vsubpd %ymm10, %ymm13, %ymm14
|
||||
vmovupd _poly_coeff_4(%rdx), %ymm15
|
||||
vmovupd _poly_coeff_3(%rdx), %ymm13
|
||||
vmulpd %ymm3, %ymm3, %ymm10
|
||||
vfmadd213pd %ymm15, %ymm3, %ymm13
|
||||
vmovdqa %ymm15, %ymm12
|
||||
vfmadd231pd _poly_coeff_3(%rdx), %ymm2, %ymm12
|
||||
vmulpd %ymm2, %ymm2, %ymm15
|
||||
|
||||
/* reconstruction */
|
||||
vfmadd213pd %ymm3, %ymm10, %ymm13
|
||||
vfmadd213pd %ymm2, %ymm15, %ymm12
|
||||
vaddpd %ymm5, %ymm13, %ymm13
|
||||
vaddpd %ymm4, %ymm12, %ymm2
|
||||
vfmadd231pd _L2(%rdx), %ymm14, %ymm13
|
||||
vfmadd132pd _L2(%rdx), %ymm2, %ymm11
|
||||
vmulpd %ymm6, %ymm13, %ymm2
|
||||
vmulpd %ymm1, %ymm11, %ymm10
|
||||
vmulpd __dbInvLn2(%rdx), %ymm2, %ymm6
|
||||
vmulpd __dbInvLn2(%rdx), %ymm10, %ymm15
|
||||
|
||||
/* to round down; if dR is an integer we will get R = 1, which is ok */
|
||||
vsubpd __dbHALF(%rdx), %ymm6, %ymm3
|
||||
vsubpd __dbHALF(%rdx), %ymm15, %ymm1
|
||||
vaddpd __dbShifter(%rdx), %ymm3, %ymm13
|
||||
vaddpd __dbShifter(%rdx), %ymm1, %ymm14
|
||||
vsubpd __dbShifter(%rdx), %ymm13, %ymm12
|
||||
vmovups (%rax), %ymm1
|
||||
vsubpd __dbShifter(%rdx), %ymm14, %ymm11
|
||||
|
||||
/* [0..1) */
|
||||
vsubpd %ymm12, %ymm6, %ymm6
|
||||
vpermps %ymm10, %ymm1, %ymm3
|
||||
vpermps %ymm2, %ymm1, %ymm10
|
||||
vpcmpgtd _NMAXVAL(%rdx), %ymm7, %ymm4
|
||||
vpcmpgtd _INF(%rdx), %ymm8, %ymm1
|
||||
vpcmpeqd _NMAXVAL(%rdx), %ymm7, %ymm7
|
||||
vpcmpeqd _INF(%rdx), %ymm8, %ymm8
|
||||
vpor %ymm7, %ymm4, %ymm2
|
||||
vpor %ymm8, %ymm1, %ymm1
|
||||
vsubpd %ymm11, %ymm15, %ymm7
|
||||
vinsertf128 $1, %xmm10, %ymm3, %ymm10
|
||||
vpor %ymm1, %ymm2, %ymm3
|
||||
|
||||
/* iAbsX = iAbsX&iAbsMask */
|
||||
vandps __iAbsMask(%rdx), %ymm10, %ymm10
|
||||
|
||||
/* iRangeMask = (iAbsX>iDomainRange) */
|
||||
vpcmpgtd __iDomainRange(%rdx), %ymm10, %ymm4
|
||||
vpor %ymm4, %ymm3, %ymm5
|
||||
vmulpd __dbC1(%rdx), %ymm7, %ymm4
|
||||
vmovmskps %ymm5, %ecx
|
||||
vmulpd __dbC1(%rdx), %ymm6, %ymm5
|
||||
|
||||
/* low K bits */
|
||||
vandps __lbLOWKBITS(%rdx), %ymm14, %ymm6
|
||||
|
||||
/* dpP= _dbT+lJ*T_ITEM_GRAN */
|
||||
vxorpd %ymm7, %ymm7, %ymm7
|
||||
vpcmpeqd %ymm1, %ymm1, %ymm1
|
||||
vandps __lbLOWKBITS(%rdx), %ymm13, %ymm2
|
||||
vxorpd %ymm10, %ymm10, %ymm10
|
||||
vpcmpeqd %ymm3, %ymm3, %ymm3
|
||||
vgatherqpd %ymm1, 13952(%rdx,%ymm6,8), %ymm7
|
||||
vgatherqpd %ymm3, 13952(%rdx,%ymm2,8), %ymm10
|
||||
vpsrlq $11, %ymm14, %ymm14
|
||||
vpsrlq $11, %ymm13, %ymm13
|
||||
vfmadd213pd %ymm7, %ymm4, %ymm7
|
||||
vfmadd213pd %ymm10, %ymm5, %ymm10
|
||||
|
||||
/* NB : including +/- sign for the exponent!! */
|
||||
vpsllq $52, %ymm14, %ymm8
|
||||
vpsllq $52, %ymm13, %ymm11
|
||||
vpaddq %ymm8, %ymm7, %ymm12
|
||||
vpaddq %ymm11, %ymm10, %ymm1
|
||||
vcvtpd2ps %ymm12, %xmm15
|
||||
vcvtpd2ps %ymm1, %xmm2
|
||||
vinsertf128 $1, %xmm2, %ymm15, %ymm1
|
||||
testl %ecx, %ecx
|
||||
jne .LBL_1_3
|
||||
|
||||
.LBL_1_2:
|
||||
cfi_remember_state
|
||||
vmovups 160(%rsp), %ymm8
|
||||
vmovups 96(%rsp), %ymm9
|
||||
vmovups 288(%rsp), %ymm10
|
||||
vmovups 384(%rsp), %ymm11
|
||||
vmovups 256(%rsp), %ymm12
|
||||
vmovups 224(%rsp), %ymm13
|
||||
vmovups 320(%rsp), %ymm14
|
||||
vmovups 352(%rsp), %ymm15
|
||||
vmovdqa %ymm1, %ymm0
|
||||
movq %rbp, %rsp
|
||||
cfi_def_cfa_register (%rsp)
|
||||
popq %rbp
|
||||
cfi_adjust_cfa_offset (-8)
|
||||
cfi_restore (%rbp)
|
||||
ret
|
||||
|
||||
.LBL_1_3:
|
||||
cfi_restore_state
|
||||
vmovups %ymm0, 64(%rsp)
|
||||
vmovups %ymm9, 128(%rsp)
|
||||
vmovups %ymm1, 192(%rsp)
|
||||
je .LBL_1_2
|
||||
|
||||
xorb %dl, %dl
|
||||
xorl %eax, %eax
|
||||
movq %rsi, 8(%rsp)
|
||||
movq %rdi, (%rsp)
|
||||
movq %r12, 40(%rsp)
|
||||
cfi_offset_rel_rsp (12, 40)
|
||||
movb %dl, %r12b
|
||||
movq %r13, 32(%rsp)
|
||||
cfi_offset_rel_rsp (13, 32)
|
||||
movl %ecx, %r13d
|
||||
movq %r14, 24(%rsp)
|
||||
cfi_offset_rel_rsp (14, 24)
|
||||
movl %eax, %r14d
|
||||
movq %r15, 16(%rsp)
|
||||
cfi_offset_rel_rsp (15, 16)
|
||||
cfi_remember_state
|
||||
|
||||
.LBL_1_6:
|
||||
btl %r14d, %r13d
|
||||
jc .LBL_1_12
|
||||
|
||||
.LBL_1_7:
|
||||
lea 1(%r14), %esi
|
||||
btl %esi, %r13d
|
||||
jc .LBL_1_10
|
||||
|
||||
.LBL_1_8:
|
||||
incb %r12b
|
||||
addl $2, %r14d
|
||||
cmpb $16, %r12b
|
||||
jb .LBL_1_6
|
||||
|
||||
movq 8(%rsp), %rsi
|
||||
movq (%rsp), %rdi
|
||||
movq 40(%rsp), %r12
|
||||
cfi_restore (%r12)
|
||||
movq 32(%rsp), %r13
|
||||
cfi_restore (%r13)
|
||||
movq 24(%rsp), %r14
|
||||
cfi_restore (%r14)
|
||||
movq 16(%rsp), %r15
|
||||
cfi_restore (%r15)
|
||||
vmovups 192(%rsp), %ymm1
|
||||
jmp .LBL_1_2
|
||||
|
||||
.LBL_1_10:
|
||||
cfi_restore_state
|
||||
movzbl %r12b, %r15d
|
||||
vmovss 68(%rsp,%r15,8), %xmm0
|
||||
vmovss 132(%rsp,%r15,8), %xmm1
|
||||
vzeroupper
|
||||
|
||||
call powf@PLT
|
||||
|
||||
vmovss %xmm0, 196(%rsp,%r15,8)
|
||||
jmp .LBL_1_8
|
||||
|
||||
.LBL_1_12:
|
||||
movzbl %r12b, %r15d
|
||||
vmovss 64(%rsp,%r15,8), %xmm0
|
||||
vmovss 128(%rsp,%r15,8), %xmm1
|
||||
vzeroupper
|
||||
|
||||
call powf@PLT
|
||||
|
||||
vmovss %xmm0, 192(%rsp,%r15,8)
|
||||
jmp .LBL_1_7
|
||||
|
||||
END(_ZGVdN8vv_powf_avx2)
|
||||
|
||||
.section .rodata, "a"
|
||||
__VPACK_ODD_ind.6357.0.1:
|
||||
.long 1
|
||||
.long 3
|
||||
.long 5
|
||||
.long 7
|
||||
.long 0
|
||||
.long 0
|
||||
.long 0
|
||||
.long 0
|
||||
.space 32, 0x00
|
||||
__VPACK_ODD_ind.6358.0.1:
|
||||
.long 1
|
||||
.long 3
|
||||
.long 5
|
||||
.long 7
|
||||
.long 0
|
||||
.long 0
|
||||
.long 0
|
||||
.long 0
|
25
sysdeps/x86_64/fpu/svml_s_powf16_core.S
Normal file
25
sysdeps/x86_64/fpu/svml_s_powf16_core.S
Normal file
@ -0,0 +1,25 @@
|
||||
/* Function powf vectorized with AVX-512. Wrapper to AVX2 version.
|
||||
Copyright (C) 2014-2015 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include "svml_s_wrapper_impl.h"
|
||||
|
||||
.text
|
||||
ENTRY (_ZGVeN16vv_powf)
|
||||
WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
|
||||
END (_ZGVeN16vv_powf)
|
29
sysdeps/x86_64/fpu/svml_s_powf4_core.S
Normal file
29
sysdeps/x86_64/fpu/svml_s_powf4_core.S
Normal file
@ -0,0 +1,29 @@
|
||||
/* Function powf vectorized with SSE2.
|
||||
Copyright (C) 2014-2015 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include "svml_s_wrapper_impl.h"
|
||||
|
||||
.text
|
||||
ENTRY (_ZGVbN4vv_powf)
|
||||
WRAPPER_IMPL_SSE2_ff powf
|
||||
END (_ZGVbN4vv_powf)
|
||||
|
||||
#ifndef USE_MULTIARCH
|
||||
libmvec_hidden_def (_ZGVbN4vv_powf)
|
||||
#endif
|
29
sysdeps/x86_64/fpu/svml_s_powf8_core.S
Normal file
29
sysdeps/x86_64/fpu/svml_s_powf8_core.S
Normal file
@ -0,0 +1,29 @@
|
||||
/* Function powf vectorized with AVX2, wrapper version.
|
||||
Copyright (C) 2014-2015 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include "svml_s_wrapper_impl.h"
|
||||
|
||||
.text
|
||||
ENTRY (_ZGVdN8vv_powf)
|
||||
WRAPPER_IMPL_AVX_ff _ZGVbN4vv_powf
|
||||
END (_ZGVdN8vv_powf)
|
||||
|
||||
#ifndef USE_MULTIARCH
|
||||
libmvec_hidden_def (_ZGVdN8vv_powf)
|
||||
#endif
|
25
sysdeps/x86_64/fpu/svml_s_powf8_core_avx.S
Normal file
25
sysdeps/x86_64/fpu/svml_s_powf8_core_avx.S
Normal file
@ -0,0 +1,25 @@
|
||||
/* Function powf vectorized in AVX ISA as wrapper to SSE4 ISA version.
|
||||
Copyright (C) 2014-2015 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include "svml_s_wrapper_impl.h"
|
||||
|
||||
.text
|
||||
ENTRY(_ZGVcN8vv_powf)
|
||||
WRAPPER_IMPL_AVX_ff _ZGVbN4vv_powf
|
||||
END(_ZGVcN8vv_powf)
|
3759
sysdeps/x86_64/fpu/svml_s_powf_data.S
Normal file
3759
sysdeps/x86_64/fpu/svml_s_powf_data.S
Normal file
File diff suppressed because it is too large
Load Diff
76
sysdeps/x86_64/fpu/svml_s_powf_data.h
Normal file
76
sysdeps/x86_64/fpu/svml_s_powf_data.h
Normal file
@ -0,0 +1,76 @@
|
||||
/* Offsets for data table for function powf.
|
||||
Copyright (C) 2014-2015 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
#ifndef S_POWF_DATA_H
|
||||
#define S_POWF_DATA_H
|
||||
|
||||
#define _Log2Rcp_lookup -4218496
|
||||
#define _NMINNORM 0
|
||||
#define _NMAXVAL 64
|
||||
#define _INF 128
|
||||
#define _ABSMASK 192
|
||||
#define _DOMAINRANGE 256
|
||||
#define _Log_HA_table 320
|
||||
#define _Log_LA_table 8576
|
||||
#define _poly_coeff_1 12736
|
||||
#define _poly_coeff_2 12800
|
||||
#define _poly_coeff_3 12864
|
||||
#define _poly_coeff_4 12928
|
||||
#define _ExpMask 12992
|
||||
#define _Two10 13056
|
||||
#define _MinNorm 13120
|
||||
#define _MaxNorm 13184
|
||||
#define _HalfMask 13248
|
||||
#define _One 13312
|
||||
#define _L2H 13376
|
||||
#define _L2L 13440
|
||||
#define _Threshold 13504
|
||||
#define _Bias 13568
|
||||
#define _Bias1 13632
|
||||
#define _L2 13696
|
||||
#define _dInfs 13760
|
||||
#define _dOnes 13824
|
||||
#define _dZeros 13888
|
||||
#define __dbT 13952
|
||||
#define __dbInvLn2 30400
|
||||
#define __dbShifter 30464
|
||||
#define __dbHALF 30528
|
||||
#define __dbC1 30592
|
||||
#define __lbLOWKBITS 30656
|
||||
#define __iAbsMask 30720
|
||||
#define __iDomainRange 30784
|
||||
|
||||
.macro double_vector offset value
|
||||
.if .-__svml_spow_data != \offset
|
||||
.err
|
||||
.endif
|
||||
.rept 8
|
||||
.quad \value
|
||||
.endr
|
||||
.endm
|
||||
|
||||
.macro float_vector offset value
|
||||
.if .-__svml_spow_data != \offset
|
||||
.err
|
||||
.endif
|
||||
.rept 16
|
||||
.long \value
|
||||
.endr
|
||||
.endm
|
||||
|
||||
#endif
|
@ -44,6 +44,38 @@
|
||||
ret
|
||||
.endm
|
||||
|
||||
/* 2 argument SSE2 ISA version as wrapper to scalar. */
|
||||
.macro WRAPPER_IMPL_SSE2_ff callee
|
||||
subq $56, %rsp
|
||||
cfi_adjust_cfa_offset(56)
|
||||
movaps %xmm0, (%rsp)
|
||||
movaps %xmm1, 16(%rsp)
|
||||
call \callee@PLT
|
||||
movss %xmm0, 32(%rsp)
|
||||
movss 4(%rsp), %xmm0
|
||||
movss 20(%rsp), %xmm1
|
||||
call \callee@PLT
|
||||
movss %xmm0, 36(%rsp)
|
||||
movss 8(%rsp), %xmm0
|
||||
movss 24(%rsp), %xmm1
|
||||
call \callee@PLT
|
||||
movss %xmm0, 40(%rsp)
|
||||
movss 12(%rsp), %xmm0
|
||||
movss 28(%rsp), %xmm1
|
||||
call \callee@PLT
|
||||
movss 32(%rsp), %xmm3
|
||||
movss 36(%rsp), %xmm2
|
||||
movss 40(%rsp), %xmm1
|
||||
movss %xmm0, 44(%rsp)
|
||||
unpcklps %xmm1, %xmm3
|
||||
unpcklps %xmm0, %xmm2
|
||||
unpcklps %xmm2, %xmm3
|
||||
movaps %xmm3, %xmm0
|
||||
addq $56, %rsp
|
||||
cfi_adjust_cfa_offset(-56)
|
||||
ret
|
||||
.endm
|
||||
|
||||
/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
|
||||
.macro WRAPPER_IMPL_AVX callee
|
||||
pushq %rbp
|
||||
@ -70,6 +102,34 @@
|
||||
ret
|
||||
.endm
|
||||
|
||||
/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
|
||||
.macro WRAPPER_IMPL_AVX_ff callee
|
||||
pushq %rbp
|
||||
cfi_adjust_cfa_offset (8)
|
||||
cfi_rel_offset (%rbp, 0)
|
||||
movq %rsp, %rbp
|
||||
cfi_def_cfa_register (%rbp)
|
||||
andq $-32, %rsp
|
||||
subq $64, %rsp
|
||||
vextractf128 $1, %ymm0, 16(%rsp)
|
||||
vextractf128 $1, %ymm1, (%rsp)
|
||||
vzeroupper
|
||||
call HIDDEN_JUMPTARGET(\callee)
|
||||
vmovaps %xmm0, 32(%rsp)
|
||||
vmovaps 16(%rsp), %xmm0
|
||||
vmovaps (%rsp), %xmm1
|
||||
call HIDDEN_JUMPTARGET(\callee)
|
||||
vmovaps %xmm0, %xmm1
|
||||
vmovaps 32(%rsp), %xmm0
|
||||
vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
movq %rbp, %rsp
|
||||
cfi_def_cfa_register (%rsp)
|
||||
popq %rbp
|
||||
cfi_adjust_cfa_offset (-8)
|
||||
cfi_restore (%rbp)
|
||||
ret
|
||||
.endm
|
||||
|
||||
/* AVX512 ISA version as wrapper to AVX2 ISA version. */
|
||||
.macro WRAPPER_IMPL_AVX512 callee
|
||||
pushq %rbp
|
||||
@ -109,3 +169,65 @@
|
||||
cfi_restore (%rbp)
|
||||
ret
|
||||
.endm
|
||||
|
||||
/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
|
||||
.macro WRAPPER_IMPL_AVX512_ff callee
|
||||
pushq %rbp
|
||||
cfi_adjust_cfa_offset (8)
|
||||
cfi_rel_offset (%rbp, 0)
|
||||
movq %rsp, %rbp
|
||||
cfi_def_cfa_register (%rbp)
|
||||
andq $-64, %rsp
|
||||
subq $128, %rsp
|
||||
/* Below is encoding for vmovaps %zmm0, (%rsp). */
|
||||
.byte 0x62
|
||||
.byte 0xf1
|
||||
.byte 0x7c
|
||||
.byte 0x48
|
||||
.byte 0x29
|
||||
.byte 0x04
|
||||
.byte 0x24
|
||||
/* Below is encoding for vmovaps %zmm1, 64(%rsp). */
|
||||
.byte 0x62
|
||||
.byte 0xf1
|
||||
.byte 0x7c
|
||||
.byte 0x48
|
||||
.byte 0x29
|
||||
.byte 0x4c
|
||||
.byte 0x24
|
||||
/* Below is encoding for vmovaps (%rsp), %ymm0. */
|
||||
.byte 0xc5
|
||||
.byte 0xfc
|
||||
.byte 0x28
|
||||
.byte 0x04
|
||||
.byte 0x24
|
||||
/* Below is encoding for vmovaps 64(%rsp), %ymm1. */
|
||||
.byte 0xc5
|
||||
.byte 0xfc
|
||||
.byte 0x28
|
||||
.byte 0x4c
|
||||
.byte 0x24
|
||||
.byte 0x40
|
||||
call HIDDEN_JUMPTARGET(\callee)
|
||||
/* Below is encoding for vmovaps 32(%rsp), %ymm0. */
|
||||
.byte 0xc5
|
||||
.byte 0xfc
|
||||
.byte 0x28
|
||||
.byte 0x44
|
||||
.byte 0x24
|
||||
.byte 0x20
|
||||
/* Below is encoding for vmovaps 96(%rsp), %ymm1. */
|
||||
.byte 0xc5
|
||||
.byte 0xfc
|
||||
.byte 0x28
|
||||
.byte 0x4c
|
||||
.byte 0x24
|
||||
.byte 0x60
|
||||
call HIDDEN_JUMPTARGET(\callee)
|
||||
movq %rbp, %rsp
|
||||
cfi_def_cfa_register (%rsp)
|
||||
popq %rbp
|
||||
cfi_adjust_cfa_offset (-8)
|
||||
cfi_restore (%rbp)
|
||||
ret
|
||||
.endm
|
||||
|
@ -26,3 +26,4 @@ VECTOR_WRAPPER (WRAPPER_NAME (cosf), _ZGVeN16v_cosf)
|
||||
VECTOR_WRAPPER (WRAPPER_NAME (sinf), _ZGVeN16v_sinf)
|
||||
VECTOR_WRAPPER (WRAPPER_NAME (logf), _ZGVeN16v_logf)
|
||||
VECTOR_WRAPPER (WRAPPER_NAME (expf), _ZGVeN16v_expf)
|
||||
VECTOR_WRAPPER_ff (WRAPPER_NAME (powf), _ZGVeN16vv_powf)
|
||||
|
@ -22,6 +22,7 @@
|
||||
#define TEST_VECTOR_sinf 1
|
||||
#define TEST_VECTOR_logf 1
|
||||
#define TEST_VECTOR_expf 1
|
||||
#define TEST_VECTOR_powf 1
|
||||
|
||||
#define REQUIRE_AVX512F
|
||||
|
||||
|
@ -26,3 +26,4 @@ VECTOR_WRAPPER (WRAPPER_NAME (cosf), _ZGVbN4v_cosf)
|
||||
VECTOR_WRAPPER (WRAPPER_NAME (sinf), _ZGVbN4v_sinf)
|
||||
VECTOR_WRAPPER (WRAPPER_NAME (logf), _ZGVbN4v_logf)
|
||||
VECTOR_WRAPPER (WRAPPER_NAME (expf), _ZGVbN4v_expf)
|
||||
VECTOR_WRAPPER_ff (WRAPPER_NAME (powf), _ZGVbN4vv_powf)
|
||||
|
@ -22,5 +22,6 @@
|
||||
#define TEST_VECTOR_sinf 1
|
||||
#define TEST_VECTOR_logf 1
|
||||
#define TEST_VECTOR_expf 1
|
||||
#define TEST_VECTOR_powf 1
|
||||
|
||||
#include "libm-test.c"
|
||||
|
@ -29,3 +29,4 @@ VECTOR_WRAPPER (WRAPPER_NAME (cosf), _ZGVdN8v_cosf)
|
||||
VECTOR_WRAPPER (WRAPPER_NAME (sinf), _ZGVdN8v_sinf)
|
||||
VECTOR_WRAPPER (WRAPPER_NAME (logf), _ZGVdN8v_logf)
|
||||
VECTOR_WRAPPER (WRAPPER_NAME (expf), _ZGVdN8v_expf)
|
||||
VECTOR_WRAPPER_ff (WRAPPER_NAME (powf), _ZGVdN8vv_powf)
|
||||
|
@ -25,6 +25,7 @@
|
||||
#define TEST_VECTOR_sinf 1
|
||||
#define TEST_VECTOR_logf 1
|
||||
#define TEST_VECTOR_expf 1
|
||||
#define TEST_VECTOR_powf 1
|
||||
|
||||
#define REQUIRE_AVX2
|
||||
|
||||
|
@ -26,3 +26,4 @@ VECTOR_WRAPPER (WRAPPER_NAME (cosf), _ZGVcN8v_cosf)
|
||||
VECTOR_WRAPPER (WRAPPER_NAME (sinf), _ZGVcN8v_sinf)
|
||||
VECTOR_WRAPPER (WRAPPER_NAME (logf), _ZGVcN8v_logf)
|
||||
VECTOR_WRAPPER (WRAPPER_NAME (expf), _ZGVcN8v_expf)
|
||||
VECTOR_WRAPPER_ff (WRAPPER_NAME (powf), _ZGVcN8vv_powf)
|
||||
|
@ -22,5 +22,6 @@
|
||||
#define TEST_VECTOR_sinf 1
|
||||
#define TEST_VECTOR_logf 1
|
||||
#define TEST_VECTOR_expf 1
|
||||
#define TEST_VECTOR_powf 1
|
||||
|
||||
#include "libm-test.c"
|
||||
|
Loading…
Reference in New Issue
Block a user