mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-25 20:21:07 +00:00
d96164c330
Various floating-point functions have code to force underflow exceptions if a tiny result was computed in a way that might not have resulted in such exceptions even though the result is inexact. This typically uses math_force_eval to ensure that the underflowing expression is evaluated, but sometimes uses volatile. This patch refactors such code to use three new macros math_check_force_underflow, math_check_force_underflow_nonneg and math_check_force_underflow_complex (which in turn use math_force_eval). In the limited number of cases not suited to a simple conversion to these macros, existing uses of volatile are changed to use math_force_eval instead. The converted code does not always execute exactly the same sequence of operations as the original code, but the overall effects should be the same. Tested for x86_64, x86, mips64 and powerpc. * sysdeps/generic/math_private.h (fabs_tg): New macro. (min_of_type): Likewise. (math_check_force_underflow): Likewise. (math_check_force_underflow_nonneg): Likewise. (math_check_force_underflow_complex): Likewise. * math/e_exp2l.c (__ieee754_exp2l): Use math_check_force_underflow_nonneg. * math/k_casinh.c (__kernel_casinh): Likewise. * math/k_casinhf.c (__kernel_casinhf): Likewise. * math/k_casinhl.c (__kernel_casinhl): Likewise. * math/s_catan.c (__catan): Use math_check_force_underflow_complex. * math/s_catanf.c (__catanf): Likewise. * math/s_catanh.c (__catanh): Likewise. * math/s_catanhf.c (__catanhf): Likewise. * math/s_catanhl.c (__catanhl): Likewise. * math/s_catanl.c (__catanl): Likewise. * math/s_ccosh.c (__ccosh): Likewise. * math/s_ccoshf.c (__ccoshf): Likewise. * math/s_ccoshl.c (__ccoshl): Likewise. * math/s_cexp.c (__cexp): Likewise. * math/s_cexpf.c (__cexpf): Likewise. * math/s_cexpl.c (__cexpl): Likewise. * math/s_clog.c (__clog): Use math_check_force_underflow_nonneg. * math/s_clog10.c (__clog10): Likewise. * math/s_clog10f.c (__clog10f): Likewise. * math/s_clog10l.c (__clog10l): Likewise. * math/s_clogf.c (__clogf): Likewise. * math/s_clogl.c (__clogl): Likewise. * math/s_csin.c (__csin): Use math_check_force_underflow_complex. * math/s_csinf.c (__csinf): Likewise. * math/s_csinh.c (__csinh): Likewise. * math/s_csinhf.c (__csinhf): Likewise. * math/s_csinhl.c (__csinhl): Likewise. * math/s_csinl.c (__csinl): Likewise. * math/s_csqrt.c (__csqrt): Use math_check_force_underflow. * math/s_csqrtf.c (__csqrtf): Likewise. * math/s_csqrtl.c (__csqrtl): Likewise. * math/s_ctan.c (__ctan): Use math_check_force_underflow_complex. * math/s_ctanf.c (__ctanf): Likewise. * math/s_ctanh.c (__ctanh): Likewise. * math/s_ctanhf.c (__ctanhf): Likewise. * math/s_ctanhl.c (__ctanhl): Likewise. * math/s_ctanl.c (__ctanl): Likewise. * stdlib/strtod_l.c (round_and_return): Use math_force_eval instead of volatile. * sysdeps/ieee754/dbl-64/e_asin.c (__ieee754_asin): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/e_atanh.c (__ieee754_atanh): Likewise. * sysdeps/ieee754/dbl-64/e_exp.c (__ieee754_exp): Do not use volatile when forcing underflow. * sysdeps/ieee754/dbl-64/e_exp2.c (__ieee754_exp2): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/dbl-64/e_gamma_r.c (__ieee754_gamma_r): Likewise. * sysdeps/ieee754/dbl-64/e_j1.c (__ieee754_j1): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/e_jn.c (__ieee754_jn): Likewise. * sysdeps/ieee754/dbl-64/e_sinh.c (__ieee754_sinh): Likewise. * sysdeps/ieee754/dbl-64/s_asinh.c (__asinh): Likewise. * sysdeps/ieee754/dbl-64/s_atan.c (atan): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/dbl-64/s_erf.c (__erf): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/s_expm1.c (__expm1): Likewise. * sysdeps/ieee754/dbl-64/s_fma.c (__fma): Use math_force_eval instead of volatile. * sysdeps/ieee754/dbl-64/s_log1p.c (__log1p): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/s_sin.c (__sin): Likewise. * sysdeps/ieee754/dbl-64/s_tan.c (tan): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/dbl-64/s_tanh.c (__tanh): Use math_check_force_underflow. * sysdeps/ieee754/flt-32/e_asinf.c (__ieee754_asinf): Likewise. * sysdeps/ieee754/flt-32/e_atanhf.c (__ieee754_atanhf): Likewise. * sysdeps/ieee754/flt-32/e_exp2f.c (__ieee754_exp2f): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/flt-32/e_gammaf_r.c (__ieee754_gammaf_r): Likewise. * sysdeps/ieee754/flt-32/e_j1f.c (__ieee754_j1f): Use math_check_force_underflow. * sysdeps/ieee754/flt-32/e_jnf.c (__ieee754_jnf): Likewise. * sysdeps/ieee754/flt-32/e_sinhf.c (__ieee754_sinhf): Likewise. * sysdeps/ieee754/flt-32/k_sinf.c (__kernel_sinf): Likewise. * sysdeps/ieee754/flt-32/k_tanf.c (__kernel_tanf): Likewise. * sysdeps/ieee754/flt-32/s_asinhf.c (__asinhf): Likewise. * sysdeps/ieee754/flt-32/s_atanf.c (__atanf): Likewise. * sysdeps/ieee754/flt-32/s_erff.c (__erff): Likewise. * sysdeps/ieee754/flt-32/s_expm1f.c (__expm1f): Likewise. * sysdeps/ieee754/flt-32/s_log1pf.c (__log1pf): Likewise. * sysdeps/ieee754/flt-32/s_tanhf.c (__tanhf): Likewise. * sysdeps/ieee754/ldbl-128/e_asinl.c (__ieee754_asinl): Likewise. * sysdeps/ieee754/ldbl-128/e_atanhl.c (__ieee754_atanhl): Likewise. * sysdeps/ieee754/ldbl-128/e_expl.c (__ieee754_expl): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-128/e_gammal_r.c (__ieee754_gammal_r): Likewise. * sysdeps/ieee754/ldbl-128/e_j1l.c (__ieee754_j1l): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128/e_jnl.c (__ieee754_jnl): Likewise. * sysdeps/ieee754/ldbl-128/e_sinhl.c (__ieee754_sinhl): Likewise. * sysdeps/ieee754/ldbl-128/k_sincosl.c (__kernel_sincosl): Likewise. * sysdeps/ieee754/ldbl-128/k_sinl.c (__kernel_sinl): Likewise. * sysdeps/ieee754/ldbl-128/k_tanl.c (__kernel_tanl): Likewise. * sysdeps/ieee754/ldbl-128/s_asinhl.c (__asinhl): Likewise. * sysdeps/ieee754/ldbl-128/s_atanl.c (__atanl): Likewise. * sysdeps/ieee754/ldbl-128/s_erfl.c (__erfl): Likewise. * sysdeps/ieee754/ldbl-128/s_expm1l.c (__expm1l): Likewise. * sysdeps/ieee754/ldbl-128/s_fmal.c (__fmal): Use math_force_eval instead of volatile. * sysdeps/ieee754/ldbl-128/s_log1pl.c (__log1pl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128/s_tanhl.c (__tanhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/e_asinl.c (__ieee754_asinl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128ibm/e_atanhl.c (__ieee754_atanhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/e_gammal_r.c (__ieee754_gammal_r): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-128ibm/e_jnl.c (__ieee754_jnl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128ibm/e_sinhl.c (__ieee754_sinhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/k_sincosl.c (__kernel_sincosl): Likewise. * sysdeps/ieee754/ldbl-128ibm/k_sinl.c (__kernel_sinl): Likewise. * sysdeps/ieee754/ldbl-128ibm/k_tanl.c (__kernel_tanl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_asinhl.c (__asinhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_atanl.c (__atanl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_erfl.c (__erfl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_tanhl.c (__tanhl): Likewise. * sysdeps/ieee754/ldbl-96/e_asinl.c (__ieee754_asinl): Likewise. * sysdeps/ieee754/ldbl-96/e_atanhl.c (__ieee754_atanhl): Likewise. * sysdeps/ieee754/ldbl-96/e_gammal_r.c (__ieee754_gammal_r): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-96/e_j1l.c (__ieee754_j1l): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-96/e_jnl.c (__ieee754_jnl): Likewise. * sysdeps/ieee754/ldbl-96/e_sinhl.c (__ieee754_sinhl): Likewise. * sysdeps/ieee754/ldbl-96/k_sinl.c (__kernel_sinl): Likewise. * sysdeps/ieee754/ldbl-96/k_tanl.c (__kernel_tanl): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-96/s_asinhl.c (__asinhl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-96/s_erfl.c (__erfl): Likewise. * sysdeps/ieee754/ldbl-96/s_fmal.c (__fmal): Use math_force_eval instead of volatile. * sysdeps/ieee754/ldbl-96/s_tanhl.c (__tanhl): Use math_check_force_underflow.
329 lines
10 KiB
C
329 lines
10 KiB
C
/*
|
|
* IBM Accurate Mathematical Library
|
|
* written by International Business Machines Corp.
|
|
* Copyright (C) 2001-2015 Free Software Foundation, Inc.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU Lesser General Public License as published by
|
|
* the Free Software Foundation; either version 2.1 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public License
|
|
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
/************************************************************************/
|
|
/* MODULE_NAME: atnat.c */
|
|
/* */
|
|
/* FUNCTIONS: uatan */
|
|
/* atanMp */
|
|
/* signArctan */
|
|
/* */
|
|
/* */
|
|
/* FILES NEEDED: dla.h endian.h mpa.h mydefs.h atnat.h */
|
|
/* mpatan.c mpatan2.c mpsqrt.c */
|
|
/* uatan.tbl */
|
|
/* */
|
|
/* An ultimate atan() routine. Given an IEEE double machine number x */
|
|
/* it computes the correctly rounded (to nearest) value of atan(x). */
|
|
/* */
|
|
/* Assumption: Machine arithmetic operations are performed in */
|
|
/* round to nearest mode of IEEE 754 standard. */
|
|
/* */
|
|
/************************************************************************/
|
|
|
|
#include <dla.h>
|
|
#include "mpa.h"
|
|
#include "MathLib.h"
|
|
#include "uatan.tbl"
|
|
#include "atnat.h"
|
|
#include <fenv.h>
|
|
#include <float.h>
|
|
#include <math.h>
|
|
#include <math_private.h>
|
|
#include <stap-probe.h>
|
|
|
|
void __mpatan (mp_no *, mp_no *, int); /* see definition in mpatan.c */
|
|
static double atanMp (double, const int[]);
|
|
|
|
/* Fix the sign of y and return */
|
|
static double
|
|
__signArctan (double x, double y)
|
|
{
|
|
return __copysign (y, x);
|
|
}
|
|
|
|
|
|
/* An ultimate atan() routine. Given an IEEE double machine number x, */
|
|
/* routine computes the correctly rounded (to nearest) value of atan(x). */
|
|
double
|
|
atan (double x)
|
|
{
|
|
double cor, s1, ss1, s2, ss2, t1, t2, t3, t7, t8, t9, t10, u, u2, u3,
|
|
v, vv, w, ww, y, yy, z, zz;
|
|
#ifndef DLA_FMS
|
|
double t4, t5, t6;
|
|
#endif
|
|
int i, ux, dx;
|
|
static const int pr[M] = { 6, 8, 10, 32 };
|
|
number num;
|
|
|
|
num.d = x;
|
|
ux = num.i[HIGH_HALF];
|
|
dx = num.i[LOW_HALF];
|
|
|
|
/* x=NaN */
|
|
if (((ux & 0x7ff00000) == 0x7ff00000)
|
|
&& (((ux & 0x000fffff) | dx) != 0x00000000))
|
|
return x + x;
|
|
|
|
/* Regular values of x, including denormals +-0 and +-INF */
|
|
SET_RESTORE_ROUND (FE_TONEAREST);
|
|
u = (x < 0) ? -x : x;
|
|
if (u < C)
|
|
{
|
|
if (u < B)
|
|
{
|
|
if (u < A)
|
|
{
|
|
math_check_force_underflow_nonneg (u);
|
|
return x;
|
|
}
|
|
else
|
|
{ /* A <= u < B */
|
|
v = x * x;
|
|
yy = d11.d + v * d13.d;
|
|
yy = d9.d + v * yy;
|
|
yy = d7.d + v * yy;
|
|
yy = d5.d + v * yy;
|
|
yy = d3.d + v * yy;
|
|
yy *= x * v;
|
|
|
|
if ((y = x + (yy - U1 * x)) == x + (yy + U1 * x))
|
|
return y;
|
|
|
|
EMULV (x, x, v, vv, t1, t2, t3, t4, t5); /* v+vv=x^2 */
|
|
|
|
s1 = f17.d + v * f19.d;
|
|
s1 = f15.d + v * s1;
|
|
s1 = f13.d + v * s1;
|
|
s1 = f11.d + v * s1;
|
|
s1 *= v;
|
|
|
|
ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
|
|
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
|
|
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (f5.d, ff5.d, s1, ss1, s2, ss2, t1, t2);
|
|
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (f3.d, ff3.d, s1, ss1, s2, ss2, t1, t2);
|
|
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
MUL2 (x, 0, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7,
|
|
t8);
|
|
ADD2 (x, 0, s2, ss2, s1, ss1, t1, t2);
|
|
if ((y = s1 + (ss1 - U5 * s1)) == s1 + (ss1 + U5 * s1))
|
|
return y;
|
|
|
|
return atanMp (x, pr);
|
|
}
|
|
}
|
|
else
|
|
{ /* B <= u < C */
|
|
i = (TWO52 + TWO8 * u) - TWO52;
|
|
i -= 16;
|
|
z = u - cij[i][0].d;
|
|
yy = cij[i][5].d + z * cij[i][6].d;
|
|
yy = cij[i][4].d + z * yy;
|
|
yy = cij[i][3].d + z * yy;
|
|
yy = cij[i][2].d + z * yy;
|
|
yy *= z;
|
|
|
|
t1 = cij[i][1].d;
|
|
if (i < 112)
|
|
{
|
|
if (i < 48)
|
|
u2 = U21; /* u < 1/4 */
|
|
else
|
|
u2 = U22;
|
|
} /* 1/4 <= u < 1/2 */
|
|
else
|
|
{
|
|
if (i < 176)
|
|
u2 = U23; /* 1/2 <= u < 3/4 */
|
|
else
|
|
u2 = U24;
|
|
} /* 3/4 <= u <= 1 */
|
|
if ((y = t1 + (yy - u2 * t1)) == t1 + (yy + u2 * t1))
|
|
return __signArctan (x, y);
|
|
|
|
z = u - hij[i][0].d;
|
|
|
|
s1 = hij[i][14].d + z * hij[i][15].d;
|
|
s1 = hij[i][13].d + z * s1;
|
|
s1 = hij[i][12].d + z * s1;
|
|
s1 = hij[i][11].d + z * s1;
|
|
s1 *= z;
|
|
|
|
ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
|
|
MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
|
|
MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (hij[i][5].d, hij[i][6].d, s1, ss1, s2, ss2, t1, t2);
|
|
MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (hij[i][3].d, hij[i][4].d, s1, ss1, s2, ss2, t1, t2);
|
|
MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (hij[i][1].d, hij[i][2].d, s1, ss1, s2, ss2, t1, t2);
|
|
if ((y = s2 + (ss2 - U6 * s2)) == s2 + (ss2 + U6 * s2))
|
|
return __signArctan (x, y);
|
|
|
|
return atanMp (x, pr);
|
|
}
|
|
}
|
|
else
|
|
{
|
|
if (u < D)
|
|
{ /* C <= u < D */
|
|
w = 1 / u;
|
|
EMULV (w, u, t1, t2, t3, t4, t5, t6, t7);
|
|
ww = w * ((1 - t1) - t2);
|
|
i = (TWO52 + TWO8 * w) - TWO52;
|
|
i -= 16;
|
|
z = (w - cij[i][0].d) + ww;
|
|
|
|
yy = cij[i][5].d + z * cij[i][6].d;
|
|
yy = cij[i][4].d + z * yy;
|
|
yy = cij[i][3].d + z * yy;
|
|
yy = cij[i][2].d + z * yy;
|
|
yy = HPI1 - z * yy;
|
|
|
|
t1 = HPI - cij[i][1].d;
|
|
if (i < 112)
|
|
u3 = U31; /* w < 1/2 */
|
|
else
|
|
u3 = U32; /* w >= 1/2 */
|
|
if ((y = t1 + (yy - u3)) == t1 + (yy + u3))
|
|
return __signArctan (x, y);
|
|
|
|
DIV2 (1, 0, u, 0, w, ww, t1, t2, t3, t4, t5, t6, t7, t8, t9,
|
|
t10);
|
|
t1 = w - hij[i][0].d;
|
|
EADD (t1, ww, z, zz);
|
|
|
|
s1 = hij[i][14].d + z * hij[i][15].d;
|
|
s1 = hij[i][13].d + z * s1;
|
|
s1 = hij[i][12].d + z * s1;
|
|
s1 = hij[i][11].d + z * s1;
|
|
s1 *= z;
|
|
|
|
ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
|
|
MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
|
|
MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (hij[i][5].d, hij[i][6].d, s1, ss1, s2, ss2, t1, t2);
|
|
MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (hij[i][3].d, hij[i][4].d, s1, ss1, s2, ss2, t1, t2);
|
|
MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (hij[i][1].d, hij[i][2].d, s1, ss1, s2, ss2, t1, t2);
|
|
SUB2 (HPI, HPI1, s2, ss2, s1, ss1, t1, t2);
|
|
if ((y = s1 + (ss1 - U7)) == s1 + (ss1 + U7))
|
|
return __signArctan (x, y);
|
|
|
|
return atanMp (x, pr);
|
|
}
|
|
else
|
|
{
|
|
if (u < E)
|
|
{ /* D <= u < E */
|
|
w = 1 / u;
|
|
v = w * w;
|
|
EMULV (w, u, t1, t2, t3, t4, t5, t6, t7);
|
|
|
|
yy = d11.d + v * d13.d;
|
|
yy = d9.d + v * yy;
|
|
yy = d7.d + v * yy;
|
|
yy = d5.d + v * yy;
|
|
yy = d3.d + v * yy;
|
|
yy *= w * v;
|
|
|
|
ww = w * ((1 - t1) - t2);
|
|
ESUB (HPI, w, t3, cor);
|
|
yy = ((HPI1 + cor) - ww) - yy;
|
|
if ((y = t3 + (yy - U4)) == t3 + (yy + U4))
|
|
return __signArctan (x, y);
|
|
|
|
DIV2 (1, 0, u, 0, w, ww, t1, t2, t3, t4, t5, t6, t7, t8,
|
|
t9, t10);
|
|
MUL2 (w, ww, w, ww, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
|
|
s1 = f17.d + v * f19.d;
|
|
s1 = f15.d + v * s1;
|
|
s1 = f13.d + v * s1;
|
|
s1 = f11.d + v * s1;
|
|
s1 *= v;
|
|
|
|
ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
|
|
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
|
|
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (f5.d, ff5.d, s1, ss1, s2, ss2, t1, t2);
|
|
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (f3.d, ff3.d, s1, ss1, s2, ss2, t1, t2);
|
|
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
MUL2 (w, ww, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (w, ww, s2, ss2, s1, ss1, t1, t2);
|
|
SUB2 (HPI, HPI1, s1, ss1, s2, ss2, t1, t2);
|
|
|
|
if ((y = s2 + (ss2 - U8)) == s2 + (ss2 + U8))
|
|
return __signArctan (x, y);
|
|
|
|
return atanMp (x, pr);
|
|
}
|
|
else
|
|
{
|
|
/* u >= E */
|
|
if (x > 0)
|
|
return HPI;
|
|
else
|
|
return MHPI;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Final stages. Compute atan(x) by multiple precision arithmetic */
|
|
static double
|
|
atanMp (double x, const int pr[])
|
|
{
|
|
mp_no mpx, mpy, mpy2, mperr, mpt1, mpy1;
|
|
double y1, y2;
|
|
int i, p;
|
|
|
|
for (i = 0; i < M; i++)
|
|
{
|
|
p = pr[i];
|
|
__dbl_mp (x, &mpx, p);
|
|
__mpatan (&mpx, &mpy, p);
|
|
__dbl_mp (u9[i].d, &mpt1, p);
|
|
__mul (&mpy, &mpt1, &mperr, p);
|
|
__add (&mpy, &mperr, &mpy1, p);
|
|
__sub (&mpy, &mperr, &mpy2, p);
|
|
__mp_dbl (&mpy1, &y1, p);
|
|
__mp_dbl (&mpy2, &y2, p);
|
|
if (y1 == y2)
|
|
{
|
|
LIBC_PROBE (slowatan, 3, &p, &x, &y1);
|
|
return y1;
|
|
}
|
|
}
|
|
LIBC_PROBE (slowatan_inexact, 3, &p, &x, &y1);
|
|
return y1; /*if impossible to do exact computing */
|
|
}
|
|
|
|
#ifdef NO_LONG_DOUBLE
|
|
weak_alias (atan, atanl)
|
|
#endif
|