x86_64: Fix svml_d_asinh4_core_avx2.S code formatting

This commit contains following formatting changes

1. Instructions proceeded by a tab.
2. Instruction less than 8 characters in length have a tab
   between it and the first operand.
3. Instruction greater than 7 characters in length have a
   space between it and the first operand.
4. Tabs after `#define`d names and their value.
5. 8 space at the beginning of line replaced by tab.
6. Indent comments with code.
7. Remove redundent .text section.
8. 1 space between line content and line comment.
9. Space after all commas.

Reviewed-by: Noah Goldstein <goldstein.w.n@gmail.com>
This commit is contained in:
Sunil K Pandey 2022-03-07 10:47:09 -08:00
parent a7ab967662
commit b61bfd101e

View File

@ -67,8 +67,7 @@
#include <sysdep.h> #include <sysdep.h>
.text .section .text.avx2, "ax", @progbits
.section .text.avx2,"ax",@progbits
ENTRY(_ZGVdN4v_asinh_avx2) ENTRY(_ZGVdN4v_asinh_avx2)
pushq %rbp pushq %rbp
cfi_def_cfa_offset(16) cfi_def_cfa_offset(16)
@ -81,26 +80,26 @@ ENTRY(_ZGVdN4v_asinh_avx2)
vmovapd %ymm0, %ymm13 vmovapd %ymm0, %ymm13
vmovupd SgnMask+__svml_dasinh_data_internal(%rip), %ymm9 vmovupd SgnMask+__svml_dasinh_data_internal(%rip), %ymm9
/* Load the constant 1 and a sign mask */ /* Load the constant 1 and a sign mask */
vmovupd One+__svml_dasinh_data_internal(%rip), %ymm12 vmovupd One+__svml_dasinh_data_internal(%rip), %ymm12
/* No need to split X when FMA is available in hardware. */ /* No need to split X when FMA is available in hardware. */
vmulpd %ymm13, %ymm13, %ymm8 vmulpd %ymm13, %ymm13, %ymm8
/* /*
* Get the absolute value of the input, since we will exploit antisymmetry * Get the absolute value of the input, since we will exploit antisymmetry
* and mostly assume X >= 0 in the core computation * and mostly assume X >= 0 in the core computation
*/ */
vandpd %ymm9, %ymm13, %ymm10 vandpd %ymm9, %ymm13, %ymm10
/* /*
* Check whether the input is finite, by checking |X| <= MaxFloat * Check whether the input is finite, by checking |X| <= MaxFloat
* Otherwise set the rangemask so that the callout will get used. * Otherwise set the rangemask so that the callout will get used.
* Note that this will also use the callout for NaNs since not(NaN <= MaxFloat) * Note that this will also use the callout for NaNs since not(NaN <= MaxFloat)
*/ */
vcmpnle_uqpd dLargestFinite+__svml_dasinh_data_internal(%rip), %ymm10, %ymm14 vcmpnle_uqpd dLargestFinite+__svml_dasinh_data_internal(%rip), %ymm10, %ymm14
/* /*
* Finally, express Y + W = X^2 + 1 accurately where Y has <= 29 bits. * Finally, express Y + W = X^2 + 1 accurately where Y has <= 29 bits.
* If |X| <= 1 then |XHi| <= 1 and so |X2Hi| <= 1, so we can treat 1 * If |X| <= 1 then |XHi| <= 1 and so |X2Hi| <= 1, so we can treat 1
* as the dominant component in the compensated summation. Otherwise, * as the dominant component in the compensated summation. Otherwise,
@ -112,7 +111,7 @@ ENTRY(_ZGVdN4v_asinh_avx2)
*/ */
vaddpd %ymm8, %ymm12, %ymm5 vaddpd %ymm8, %ymm12, %ymm5
/* /*
* The following computation can go wrong for very large X, basically * The following computation can go wrong for very large X, basically
* because X^2 overflows. But for large X we have * because X^2 overflows. But for large X we have
* asinh(X) / log(2 X) - 1 =~= 1/(4 * X^2), so for X >= 2^30 * asinh(X) / log(2 X) - 1 =~= 1/(4 * X^2), so for X >= 2^30
@ -126,7 +125,7 @@ ENTRY(_ZGVdN4v_asinh_avx2)
vmovmskpd %ymm14, %eax vmovmskpd %ymm14, %eax
vandpd dTopMask29+__svml_dasinh_data_internal(%rip), %ymm5, %ymm14 vandpd dTopMask29+__svml_dasinh_data_internal(%rip), %ymm5, %ymm14
/* /*
* Compute R = 1/sqrt(Y + W) * (1 + d) * Compute R = 1/sqrt(Y + W) * (1 + d)
* Force R to <= 12 significant bits in case it isn't already * Force R to <= 12 significant bits in case it isn't already
* This means that R * Y and R^2 * Y are exactly representable. * This means that R * Y and R^2 * Y are exactly representable.
@ -140,7 +139,7 @@ ENTRY(_ZGVdN4v_asinh_avx2)
vcvtps2pd %xmm3, %ymm6 vcvtps2pd %xmm3, %ymm6
vaddpd %ymm0, %ymm7, %ymm4 vaddpd %ymm0, %ymm7, %ymm4
/* /*
* Unfortunately, we can still be in trouble if |X| <= 2^-10, since * Unfortunately, we can still be in trouble if |X| <= 2^-10, since
* the absolute error 2^-(12+53)-ish in sqrt(1 + X^2) gets scaled up * the absolute error 2^-(12+53)-ish in sqrt(1 + X^2) gets scaled up
* by 1/X and comes close to our threshold. Hence if |X| <= 2^-9, * by 1/X and comes close to our threshold. Hence if |X| <= 2^-9,
@ -151,7 +150,7 @@ ENTRY(_ZGVdN4v_asinh_avx2)
vaddpd %ymm7, %ymm8, %ymm7 vaddpd %ymm7, %ymm8, %ymm7
vaddpd %ymm2, %ymm4, %ymm15 vaddpd %ymm2, %ymm4, %ymm15
/* /*
* Now 1 / (1 + d) * Now 1 / (1 + d)
* = 1 / (1 + (sqrt(1 - e) - 1)) * = 1 / (1 + (sqrt(1 - e) - 1))
* = 1 / sqrt(1 - e) * = 1 / sqrt(1 - e)
@ -168,7 +167,7 @@ ENTRY(_ZGVdN4v_asinh_avx2)
vmovupd dC5+__svml_dasinh_data_internal(%rip), %ymm4 vmovupd dC5+__svml_dasinh_data_internal(%rip), %ymm4
vandpd dTopMask12+__svml_dasinh_data_internal(%rip), %ymm6, %ymm0 vandpd dTopMask12+__svml_dasinh_data_internal(%rip), %ymm6, %ymm0
/* /*
* Compute S = (Y/sqrt(Y + W)) * (1 + d) * Compute S = (Y/sqrt(Y + W)) * (1 + d)
* and T = (W/sqrt(Y + W)) * (1 + d) * and T = (W/sqrt(Y + W)) * (1 + d)
* so that S + T = sqrt(Y + W) * (1 + d) * so that S + T = sqrt(Y + W) * (1 + d)
@ -179,7 +178,7 @@ ENTRY(_ZGVdN4v_asinh_avx2)
vmovupd dHalf+__svml_dasinh_data_internal(%rip), %ymm6 vmovupd dHalf+__svml_dasinh_data_internal(%rip), %ymm6
vsubpd %ymm12, %ymm3, %ymm14 vsubpd %ymm12, %ymm3, %ymm14
/* /*
* Obtain sqrt(1 + X^2) - 1 in two pieces * Obtain sqrt(1 + X^2) - 1 in two pieces
* sqrt(1 + X^2) - 1 * sqrt(1 + X^2) - 1
* = sqrt(Y + W) - 1 * = sqrt(Y + W) - 1
@ -192,7 +191,7 @@ ENTRY(_ZGVdN4v_asinh_avx2)
*/ */
vaddpd %ymm1, %ymm3, %ymm2 vaddpd %ymm1, %ymm3, %ymm2
/* /*
* Compute e = -(2 * d + d^2) * Compute e = -(2 * d + d^2)
* The first FMR is exact, and the rounding error in the other is acceptable * The first FMR is exact, and the rounding error in the other is acceptable
* since d and e are ~ 2^-12 * since d and e are ~ 2^-12
@ -207,38 +206,38 @@ ENTRY(_ZGVdN4v_asinh_avx2)
vmulpd %ymm4, %ymm5, %ymm0 vmulpd %ymm4, %ymm5, %ymm0
vfmadd213pd %ymm1, %ymm2, %ymm0 vfmadd213pd %ymm1, %ymm2, %ymm0
/* Now multiplex the two possible computations */ /* Now multiplex the two possible computations */
vcmple_oqpd dLittleThreshold+__svml_dasinh_data_internal(%rip), %ymm10, %ymm2 vcmple_oqpd dLittleThreshold+__svml_dasinh_data_internal(%rip), %ymm10, %ymm2
vaddpd %ymm14, %ymm0, %ymm15 vaddpd %ymm14, %ymm0, %ymm15
/* dX2over2 = X^2/2 */ /* dX2over2 = X^2/2 */
vmulpd %ymm7, %ymm6, %ymm0 vmulpd %ymm7, %ymm6, %ymm0
/* dX4over4 = X^4/4 */ /* dX4over4 = X^4/4 */
vmulpd %ymm0, %ymm0, %ymm8 vmulpd %ymm0, %ymm0, %ymm8
/* dX46 = -X^4/4 + X^6/8 */ /* dX46 = -X^4/4 + X^6/8 */
vfmsub231pd %ymm0, %ymm8, %ymm8 vfmsub231pd %ymm0, %ymm8, %ymm8
/* dX46over2 = -X^4/8 + x^6/16 */ /* dX46over2 = -X^4/8 + x^6/16 */
vmulpd %ymm8, %ymm6, %ymm5 vmulpd %ymm8, %ymm6, %ymm5
/* 2^ (-10-exp(X) ) */ /* 2^ (-10-exp(X) ) */
vmovupd ExpMask2+__svml_dasinh_data_internal(%rip), %ymm8 vmovupd ExpMask2+__svml_dasinh_data_internal(%rip), %ymm8
vaddpd %ymm5, %ymm0, %ymm4 vaddpd %ymm5, %ymm0, %ymm4
vblendvpd %ymm2, %ymm4, %ymm15, %ymm1 vblendvpd %ymm2, %ymm4, %ymm15, %ymm1
/* /*
* Now do another compensated sum to add |X| + [sqrt(1 + X^2) - 1]. * Now do another compensated sum to add |X| + [sqrt(1 + X^2) - 1].
* It's always safe to assume |X| is larger. * It's always safe to assume |X| is larger.
* This is the final 2-part argument to the log1p function * This is the final 2-part argument to the log1p function
*/ */
vaddpd %ymm1, %ymm10, %ymm3 vaddpd %ymm1, %ymm10, %ymm3
/* Now multiplex to the case X = 2^-30 * |input|, Xl = dL = 0 in the "big" case. */ /* Now multiplex to the case X = 2^-30 * |input|, Xl = dL = 0 in the "big" case. */
vmulpd XScale+__svml_dasinh_data_internal(%rip), %ymm10, %ymm10 vmulpd XScale+__svml_dasinh_data_internal(%rip), %ymm10, %ymm10
/* /*
* Now we feed into the log1p code, using H in place of _VARG1 and * Now we feed into the log1p code, using H in place of _VARG1 and
* also adding L into Xl. * also adding L into Xl.
* compute 1+x as high, low parts * compute 1+x as high, low parts
@ -253,34 +252,34 @@ ENTRY(_ZGVdN4v_asinh_avx2)
vblendvpd %ymm11, %ymm1, %ymm10, %ymm5 vblendvpd %ymm11, %ymm1, %ymm10, %ymm5
vsubpd %ymm1, %ymm6, %ymm2 vsubpd %ymm1, %ymm6, %ymm2
/* exponent bits */ /* exponent bits */
vpsrlq $20, %ymm5, %ymm10 vpsrlq $20, %ymm5, %ymm10
vaddpd %ymm2, %ymm7, %ymm3 vaddpd %ymm2, %ymm7, %ymm3
/* /*
* Now resume the main code. * Now resume the main code.
* preserve mantissa, set input exponent to 2^(-10) * preserve mantissa, set input exponent to 2^(-10)
*/ */
vandpd ExpMask+__svml_dasinh_data_internal(%rip), %ymm5, %ymm0 vandpd ExpMask+__svml_dasinh_data_internal(%rip), %ymm5, %ymm0
vorpd Two10+__svml_dasinh_data_internal(%rip), %ymm0, %ymm2 vorpd Two10+__svml_dasinh_data_internal(%rip), %ymm0, %ymm2
/* reciprocal approximation good to at least 11 bits */ /* reciprocal approximation good to at least 11 bits */
vcvtpd2ps %ymm2, %xmm6 vcvtpd2ps %ymm2, %xmm6
vrcpps %xmm6, %xmm7 vrcpps %xmm6, %xmm7
vcvtps2pd %xmm7, %ymm15 vcvtps2pd %xmm7, %ymm15
/* exponent of X needed to scale Xl */ /* exponent of X needed to scale Xl */
vandps ExpMask0+__svml_dasinh_data_internal(%rip), %ymm5, %ymm9 vandps ExpMask0+__svml_dasinh_data_internal(%rip), %ymm5, %ymm9
vpsubq %ymm9, %ymm8, %ymm0 vpsubq %ymm9, %ymm8, %ymm0
vandpd %ymm11, %ymm3, %ymm4 vandpd %ymm11, %ymm3, %ymm4
/* round reciprocal to nearest integer, will have 1+9 mantissa bits */ /* round reciprocal to nearest integer, will have 1+9 mantissa bits */
vroundpd $0, %ymm15, %ymm3 vroundpd $0, %ymm15, %ymm3
/* scale DblRcp */ /* scale DblRcp */
vmulpd %ymm0, %ymm3, %ymm2 vmulpd %ymm0, %ymm3, %ymm2
/* argument reduction */ /* argument reduction */
vfmsub213pd %ymm12, %ymm2, %ymm5 vfmsub213pd %ymm12, %ymm2, %ymm5
vmulpd %ymm2, %ymm4, %ymm12 vmulpd %ymm2, %ymm4, %ymm12
vmovupd poly_coeff+64+__svml_dasinh_data_internal(%rip), %ymm2 vmovupd poly_coeff+64+__svml_dasinh_data_internal(%rip), %ymm2
@ -290,17 +289,17 @@ ENTRY(_ZGVdN4v_asinh_avx2)
vextractf128 $1, %ymm10, %xmm14 vextractf128 $1, %ymm10, %xmm14
vshufps $221, %xmm14, %xmm10, %xmm1 vshufps $221, %xmm14, %xmm10, %xmm1
/* biased exponent in DP format */ /* biased exponent in DP format */
vcvtdq2pd %xmm1, %ymm7 vcvtdq2pd %xmm1, %ymm7
/* exponent*log(2.0) */ /* exponent*log(2.0) */
vmovupd Threshold+__svml_dasinh_data_internal(%rip), %ymm10 vmovupd Threshold+__svml_dasinh_data_internal(%rip), %ymm10
/* Add 31 to the exponent in the "large" case to get log(2 * input) */ /* Add 31 to the exponent in the "large" case to get log(2 * input) */
vaddpd dThirtyOne+__svml_dasinh_data_internal(%rip), %ymm7, %ymm6 vaddpd dThirtyOne+__svml_dasinh_data_internal(%rip), %ymm7, %ymm6
vblendvpd %ymm11, %ymm7, %ymm6, %ymm1 vblendvpd %ymm11, %ymm7, %ymm6, %ymm1
/* /*
* prepare table index * prepare table index
* table lookup * table lookup
*/ */
@ -311,12 +310,12 @@ ENTRY(_ZGVdN4v_asinh_avx2)
vsubpd %ymm15, %ymm1, %ymm1 vsubpd %ymm15, %ymm1, %ymm1
vmulpd L2+__svml_dasinh_data_internal(%rip), %ymm1, %ymm3 vmulpd L2+__svml_dasinh_data_internal(%rip), %ymm1, %ymm3
/* polynomial */ /* polynomial */
vmovupd poly_coeff+__svml_dasinh_data_internal(%rip), %ymm1 vmovupd poly_coeff+__svml_dasinh_data_internal(%rip), %ymm1
vfmadd213pd poly_coeff+32+__svml_dasinh_data_internal(%rip), %ymm5, %ymm1 vfmadd213pd poly_coeff+32+__svml_dasinh_data_internal(%rip), %ymm5, %ymm1
vfmadd213pd %ymm2, %ymm4, %ymm1 vfmadd213pd %ymm2, %ymm4, %ymm1
/* reconstruction */ /* reconstruction */
vfmadd213pd %ymm5, %ymm4, %ymm1 vfmadd213pd %ymm5, %ymm4, %ymm1
vextractf128 $1, %ymm11, %xmm7 vextractf128 $1, %ymm11, %xmm7
vmovd %xmm11, %edx vmovd %xmm11, %edx
@ -327,24 +326,24 @@ ENTRY(_ZGVdN4v_asinh_avx2)
vpextrd $2, %xmm7, %edi vpextrd $2, %xmm7, %edi
movslq %ecx, %rcx movslq %ecx, %rcx
movslq %edi, %rdi movslq %edi, %rdi
vmovsd (%r8,%rdx), %xmm0 vmovsd (%r8, %rdx), %xmm0
vmovsd (%r8,%rsi), %xmm8 vmovsd (%r8, %rsi), %xmm8
vmovhpd (%r8,%rcx), %xmm0, %xmm6 vmovhpd (%r8, %rcx), %xmm0, %xmm6
vmovhpd (%r8,%rdi), %xmm8, %xmm9 vmovhpd (%r8, %rdi), %xmm8, %xmm9
vinsertf128 $1, %xmm9, %ymm6, %ymm0 vinsertf128 $1, %xmm9, %ymm6, %ymm0
vaddpd %ymm1, %ymm0, %ymm0 vaddpd %ymm1, %ymm0, %ymm0
vaddpd %ymm0, %ymm3, %ymm7 vaddpd %ymm0, %ymm3, %ymm7
/* Finally, reincorporate the original sign. */ /* Finally, reincorporate the original sign. */
vandpd dSign+__svml_dasinh_data_internal(%rip), %ymm13, %ymm6 vandpd dSign+__svml_dasinh_data_internal(%rip), %ymm13, %ymm6
vxorpd %ymm7, %ymm6, %ymm0 vxorpd %ymm7, %ymm6, %ymm0
testl %eax, %eax testl %eax, %eax
/* Go to special inputs processing branch */ /* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH) jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx r12 r13 r14 r15 eax ymm0 ymm13 # LOE rbx r12 r13 r14 r15 eax ymm0 ymm13
/* Restore registers /* Restore registers
* and exit the function * and exit the function
*/ */
@ -357,7 +356,7 @@ L(EXIT):
cfi_def_cfa(6, 16) cfi_def_cfa(6, 16)
cfi_offset(6, -16) cfi_offset(6, -16)
/* Branch to process /* Branch to process
* special inputs * special inputs
*/ */
@ -383,18 +382,18 @@ L(SPECIAL_VALUES_BRANCH):
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
# LOE rbx r15 r12d r13d # LOE rbx r15 r12d r13d
/* Range mask /* Range mask
* bits check * bits check
*/ */
L(RANGEMASK_CHECK): L(RANGEMASK_CHECK):
btl %r12d, %r13d btl %r12d, %r13d
/* Call scalar math function */ /* Call scalar math function */
jc L(SCALAR_MATH_CALL) jc L(SCALAR_MATH_CALL)
# LOE rbx r15 r12d r13d # LOE rbx r15 r12d r13d
/* Special inputs /* Special inputs
* processing loop * processing loop
*/ */
@ -402,7 +401,7 @@ L(SPECIAL_VALUES_LOOP):
incl %r12d incl %r12d
cmpl $4, %r12d cmpl $4, %r12d
/* Check bits in range mask */ /* Check bits in range mask */
jl L(RANGEMASK_CHECK) jl L(RANGEMASK_CHECK)
# LOE rbx r15 r12d r13d # LOE rbx r15 r12d r13d
@ -414,7 +413,7 @@ L(SPECIAL_VALUES_LOOP):
cfi_restore(14) cfi_restore(14)
vmovupd 64(%rsp), %ymm0 vmovupd 64(%rsp), %ymm0
/* Go to exit */ /* Go to exit */
jmp L(EXIT) jmp L(EXIT)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */ /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22 .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
@ -424,19 +423,19 @@ L(SPECIAL_VALUES_LOOP):
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
# LOE rbx r12 r13 r14 r15 ymm0 # LOE rbx r12 r13 r14 r15 ymm0
/* Scalar math fucntion call /* Scalar math fucntion call
* to process special input * to process special input
*/ */
L(SCALAR_MATH_CALL): L(SCALAR_MATH_CALL):
movl %r12d, %r14d movl %r12d, %r14d
movsd 32(%rsp,%r14,8), %xmm0 movsd 32(%rsp, %r14, 8), %xmm0
call asinh@PLT call asinh@PLT
# LOE rbx r14 r15 r12d r13d xmm0 # LOE rbx r14 r15 r12d r13d xmm0
movsd %xmm0, 64(%rsp,%r14,8) movsd %xmm0, 64(%rsp, %r14, 8)
/* Process special inputs in loop */ /* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP) jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx r15 r12d r13d # LOE rbx r15 r12d r13d
END(_ZGVdN4v_asinh_avx2) END(_ZGVdN4v_asinh_avx2)
@ -994,7 +993,7 @@ __svml_dasinh_data_internal:
.quad 0xc08628b36d39ec08, 0xbe1cf1abc231f7b2 .quad 0xc08628b36d39ec08, 0xbe1cf1abc231f7b2
.quad 0xc08628b56dfa36d0, 0xbe1cf2074d5ba303 .quad 0xc08628b56dfa36d0, 0xbe1cf2074d5ba303
.quad 0xc08628b76e3a4180, 0xbe1cf05cd5eed880 .quad 0xc08628b76e3a4180, 0xbe1cf05cd5eed880
/*== Log_LA_table ==*/ /* Log_LA_table */
.align 32 .align 32
.quad 0x8000000000000000 .quad 0x8000000000000000
.quad 0xbf5ff802a9ab10e6 .quad 0xbf5ff802a9ab10e6
@ -1509,93 +1508,93 @@ __svml_dasinh_data_internal:
.quad 0x3f60040155d5889e .quad 0x3f60040155d5889e
.quad 0x3f50020055655889 .quad 0x3f50020055655889
.quad 0x0000000000000000 .quad 0x0000000000000000
/*== poly_coeff[4] ==*/ /* poly_coeff[4] */
.align 32 .align 32
.quad 0x3fc9999CACDB4D0A, 0x3fc9999CACDB4D0A, 0x3fc9999CACDB4D0A, 0x3fc9999CACDB4D0A /* coeff4 */ .quad 0x3fc9999CACDB4D0A, 0x3fc9999CACDB4D0A, 0x3fc9999CACDB4D0A, 0x3fc9999CACDB4D0A /* coeff4 */
.quad 0xbfd0000148058EE1, 0xbfd0000148058EE1, 0xbfd0000148058EE1, 0xbfd0000148058EE1 /* coeff3 */ .quad 0xbfd0000148058EE1, 0xbfd0000148058EE1, 0xbfd0000148058EE1, 0xbfd0000148058EE1 /* coeff3 */
.quad 0x3fd55555555543C5, 0x3fd55555555543C5, 0x3fd55555555543C5, 0x3fd55555555543C5 /* coeff2 */ .quad 0x3fd55555555543C5, 0x3fd55555555543C5, 0x3fd55555555543C5, 0x3fd55555555543C5 /* coeff2 */
.quad 0xbfdFFFFFFFFFF81F, 0xbfdFFFFFFFFFF81F, 0xbfdFFFFFFFFFF81F, 0xbfdFFFFFFFFFF81F /* coeff1 */ .quad 0xbfdFFFFFFFFFF81F, 0xbfdFFFFFFFFFF81F, 0xbfdFFFFFFFFFF81F, 0xbfdFFFFFFFFFF81F /* coeff1 */
/*== ExpMask ==*/ /* ExpMask */
.align 32 .align 32
.quad 0x000fffffffffffff, 0x000fffffffffffff, 0x000fffffffffffff, 0x000fffffffffffff .quad 0x000fffffffffffff, 0x000fffffffffffff, 0x000fffffffffffff, 0x000fffffffffffff
/*== Two10 ==*/ /* Two10 */
.align 32 .align 32
.quad 0x3f50000000000000, 0x3f50000000000000, 0x3f50000000000000, 0x3f50000000000000 .quad 0x3f50000000000000, 0x3f50000000000000, 0x3f50000000000000, 0x3f50000000000000
/*== MinLog1p = -1+2^(-53) ==*/ /* MinLog1p = -1+2^(-53) */
.align 32 .align 32
.quad 0xbfefffffffffffff, 0xbfefffffffffffff, 0xbfefffffffffffff, 0xbfefffffffffffff .quad 0xbfefffffffffffff, 0xbfefffffffffffff, 0xbfefffffffffffff, 0xbfefffffffffffff
/*== MaxLog1p ==*/ /* MaxLog1p */
.align 32 .align 32
.quad 0x7f3ffffffffff000, 0x7f3ffffffffff000, 0x7f3ffffffffff000, 0x7f3ffffffffff000 .quad 0x7f3ffffffffff000, 0x7f3ffffffffff000, 0x7f3ffffffffff000, 0x7f3ffffffffff000
/*== One ==*/ /* One */
.align 32 .align 32
.quad 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000 .quad 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000
/*== SgnMask ==*/ /* SgnMask */
.align 32 .align 32
.quad 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff .quad 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff
/*== XThreshold ==*/ /* XThreshold */
.align 32 .align 32
.quad 0x3e00000000000000, 0x3e00000000000000, 0x3e00000000000000, 0x3e00000000000000 .quad 0x3e00000000000000, 0x3e00000000000000, 0x3e00000000000000, 0x3e00000000000000
/*== XhMask ==*/ /* XhMask */
.align 32 .align 32
.quad 0xfffffffffffffc00, 0xfffffffffffffc00, 0xfffffffffffffc00, 0xfffffffffffffc00 .quad 0xfffffffffffffc00, 0xfffffffffffffc00, 0xfffffffffffffc00, 0xfffffffffffffc00
/*== Threshold ==*/ /* Threshold */
.align 32 .align 32
.quad 0x4086a00000000000, 0x4086a00000000000, 0x4086a00000000000, 0x4086a00000000000 .quad 0x4086a00000000000, 0x4086a00000000000, 0x4086a00000000000, 0x4086a00000000000
/*== Bias ==*/ /* Bias */
.align 32 .align 32
.quad 0x408ff80000000000, 0x408ff80000000000, 0x408ff80000000000, 0x408ff80000000000 .quad 0x408ff80000000000, 0x408ff80000000000, 0x408ff80000000000, 0x408ff80000000000
/*== Bias1 ==*/ /* Bias1 */
.align 32 .align 32
.quad 0x408ff00000000000, 0x408ff00000000000, 0x408ff00000000000, 0x408ff00000000000 .quad 0x408ff00000000000, 0x408ff00000000000, 0x408ff00000000000, 0x408ff00000000000
/*== ExpMask ==*/ /* ExpMask */
.align 32 .align 32
.quad 0x7ff0000000000000, 0x7ff0000000000000, 0x7ff0000000000000, 0x7ff0000000000000 .quad 0x7ff0000000000000, 0x7ff0000000000000, 0x7ff0000000000000, 0x7ff0000000000000
/*== ExpMask2 ==*/ /* ExpMask2 */
.align 32 .align 32
.quad 0x7f40000000000000, 0x7f40000000000000, 0x7f40000000000000, 0x7f40000000000000 .quad 0x7f40000000000000, 0x7f40000000000000, 0x7f40000000000000, 0x7f40000000000000
/*== L2L ==*/ /* L2L */
.align 32 .align 32
.quad 0x3fe62E42FEFA39EF, 0x3fe62E42FEFA39EF, 0x3fe62E42FEFA39EF, 0x3fe62E42FEFA39EF .quad 0x3fe62E42FEFA39EF, 0x3fe62E42FEFA39EF, 0x3fe62E42FEFA39EF, 0x3fe62E42FEFA39EF
/*== dBigThreshold ==*/ /* dBigThreshold */
.align 32 .align 32
.quad 0x41D0000000000000, 0x41D0000000000000, 0x41D0000000000000, 0x41D0000000000000 .quad 0x41D0000000000000, 0x41D0000000000000, 0x41D0000000000000, 0x41D0000000000000
/*== dC2 ==*/ /* dC2 */
.align 32 .align 32
.quad 0x3FD8000000000000, 0x3FD8000000000000, 0x3FD8000000000000, 0x3FD8000000000000 .quad 0x3FD8000000000000, 0x3FD8000000000000, 0x3FD8000000000000, 0x3FD8000000000000
/*== dC3 ==*/ /* dC3 */
.align 32 .align 32
.quad 0x3FD4000000000000, 0x3FD4000000000000, 0x3FD4000000000000, 0x3FD4000000000000 .quad 0x3FD4000000000000, 0x3FD4000000000000, 0x3FD4000000000000, 0x3FD4000000000000
/*== dC4 ==*/ /* dC4 */
.align 32 .align 32
.quad 0x3FD1800000000000, 0x3FD1800000000000, 0x3FD1800000000000, 0x3FD1800000000000 .quad 0x3FD1800000000000, 0x3FD1800000000000, 0x3FD1800000000000, 0x3FD1800000000000
/*== dC5 ==*/ /* dC5 */
.align 32 .align 32
.quad 0x3FCF800000000000, 0x3FCF800000000000, 0x3FCF800000000000, 0x3FCF800000000000 .quad 0x3FCF800000000000, 0x3FCF800000000000, 0x3FCF800000000000, 0x3FCF800000000000
/*== dHalf ==*/ /* dHalf */
.align 32 .align 32
.quad 0x3FE0000000000000, 0x3FE0000000000000, 0x3FE0000000000000, 0x3FE0000000000000 .quad 0x3FE0000000000000, 0x3FE0000000000000, 0x3FE0000000000000, 0x3FE0000000000000
/*== dLargestFinite ==*/ /* dLargestFinite */
.align 32 .align 32
.quad 0x7FEFFFFFFFFFFFFF, 0x7FEFFFFFFFFFFFFF, 0x7FEFFFFFFFFFFFFF, 0x7FEFFFFFFFFFFFFF .quad 0x7FEFFFFFFFFFFFFF, 0x7FEFFFFFFFFFFFFF, 0x7FEFFFFFFFFFFFFF, 0x7FEFFFFFFFFFFFFF
/*== dLittleThreshold ==*/ /* dLittleThreshold */
.align 32 .align 32
.quad 0x3F60000000000000, 0x3F60000000000000, 0x3F60000000000000, 0x3F60000000000000 .quad 0x3F60000000000000, 0x3F60000000000000, 0x3F60000000000000, 0x3F60000000000000
/*== dSign ==*/ /* dSign */
.align 32 .align 32
.quad 0x8000000000000000, 0x8000000000000000, 0x8000000000000000, 0x8000000000000000 .quad 0x8000000000000000, 0x8000000000000000, 0x8000000000000000, 0x8000000000000000
/*== dThirtyOne ==*/ /* dThirtyOne */
.align 32 .align 32
.quad 0x403F000000000000, 0x403F000000000000, 0x403F000000000000, 0x403F000000000000 .quad 0x403F000000000000, 0x403F000000000000, 0x403F000000000000, 0x403F000000000000
/*== dTopMask12 ==*/ /* dTopMask12 */
.align 32 .align 32
.quad 0xFFFFFE0000000000, 0xFFFFFE0000000000, 0xFFFFFE0000000000, 0xFFFFFE0000000000 .quad 0xFFFFFE0000000000, 0xFFFFFE0000000000, 0xFFFFFE0000000000, 0xFFFFFE0000000000
/*== dTopMask29 ==*/ /* dTopMask29 */
.align 32 .align 32
.quad 0xFFFFFFFFFF000000, 0xFFFFFFFFFF000000, 0xFFFFFFFFFF000000, 0xFFFFFFFFFF000000 .quad 0xFFFFFFFFFF000000, 0xFFFFFFFFFF000000, 0xFFFFFFFFFF000000, 0xFFFFFFFFFF000000
/*== XScale ==*/ /* XScale */
.align 32 .align 32
.quad 0x3E10000000000000, 0x3E10000000000000, 0x3E10000000000000, 0x3E10000000000000 .quad 0x3E10000000000000, 0x3E10000000000000, 0x3E10000000000000, 0x3E10000000000000
.align 32 .align 32
.type __svml_dasinh_data_internal,@object .type __svml_dasinh_data_internal, @object
.size __svml_dasinh_data_internal,.-__svml_dasinh_data_internal .size __svml_dasinh_data_internal, .-__svml_dasinh_data_internal