mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-23 05:20:06 +00:00
d96164c330
Various floating-point functions have code to force underflow exceptions if a tiny result was computed in a way that might not have resulted in such exceptions even though the result is inexact. This typically uses math_force_eval to ensure that the underflowing expression is evaluated, but sometimes uses volatile. This patch refactors such code to use three new macros math_check_force_underflow, math_check_force_underflow_nonneg and math_check_force_underflow_complex (which in turn use math_force_eval). In the limited number of cases not suited to a simple conversion to these macros, existing uses of volatile are changed to use math_force_eval instead. The converted code does not always execute exactly the same sequence of operations as the original code, but the overall effects should be the same. Tested for x86_64, x86, mips64 and powerpc. * sysdeps/generic/math_private.h (fabs_tg): New macro. (min_of_type): Likewise. (math_check_force_underflow): Likewise. (math_check_force_underflow_nonneg): Likewise. (math_check_force_underflow_complex): Likewise. * math/e_exp2l.c (__ieee754_exp2l): Use math_check_force_underflow_nonneg. * math/k_casinh.c (__kernel_casinh): Likewise. * math/k_casinhf.c (__kernel_casinhf): Likewise. * math/k_casinhl.c (__kernel_casinhl): Likewise. * math/s_catan.c (__catan): Use math_check_force_underflow_complex. * math/s_catanf.c (__catanf): Likewise. * math/s_catanh.c (__catanh): Likewise. * math/s_catanhf.c (__catanhf): Likewise. * math/s_catanhl.c (__catanhl): Likewise. * math/s_catanl.c (__catanl): Likewise. * math/s_ccosh.c (__ccosh): Likewise. * math/s_ccoshf.c (__ccoshf): Likewise. * math/s_ccoshl.c (__ccoshl): Likewise. * math/s_cexp.c (__cexp): Likewise. * math/s_cexpf.c (__cexpf): Likewise. * math/s_cexpl.c (__cexpl): Likewise. * math/s_clog.c (__clog): Use math_check_force_underflow_nonneg. * math/s_clog10.c (__clog10): Likewise. * math/s_clog10f.c (__clog10f): Likewise. * math/s_clog10l.c (__clog10l): Likewise. * math/s_clogf.c (__clogf): Likewise. * math/s_clogl.c (__clogl): Likewise. * math/s_csin.c (__csin): Use math_check_force_underflow_complex. * math/s_csinf.c (__csinf): Likewise. * math/s_csinh.c (__csinh): Likewise. * math/s_csinhf.c (__csinhf): Likewise. * math/s_csinhl.c (__csinhl): Likewise. * math/s_csinl.c (__csinl): Likewise. * math/s_csqrt.c (__csqrt): Use math_check_force_underflow. * math/s_csqrtf.c (__csqrtf): Likewise. * math/s_csqrtl.c (__csqrtl): Likewise. * math/s_ctan.c (__ctan): Use math_check_force_underflow_complex. * math/s_ctanf.c (__ctanf): Likewise. * math/s_ctanh.c (__ctanh): Likewise. * math/s_ctanhf.c (__ctanhf): Likewise. * math/s_ctanhl.c (__ctanhl): Likewise. * math/s_ctanl.c (__ctanl): Likewise. * stdlib/strtod_l.c (round_and_return): Use math_force_eval instead of volatile. * sysdeps/ieee754/dbl-64/e_asin.c (__ieee754_asin): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/e_atanh.c (__ieee754_atanh): Likewise. * sysdeps/ieee754/dbl-64/e_exp.c (__ieee754_exp): Do not use volatile when forcing underflow. * sysdeps/ieee754/dbl-64/e_exp2.c (__ieee754_exp2): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/dbl-64/e_gamma_r.c (__ieee754_gamma_r): Likewise. * sysdeps/ieee754/dbl-64/e_j1.c (__ieee754_j1): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/e_jn.c (__ieee754_jn): Likewise. * sysdeps/ieee754/dbl-64/e_sinh.c (__ieee754_sinh): Likewise. * sysdeps/ieee754/dbl-64/s_asinh.c (__asinh): Likewise. * sysdeps/ieee754/dbl-64/s_atan.c (atan): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/dbl-64/s_erf.c (__erf): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/s_expm1.c (__expm1): Likewise. * sysdeps/ieee754/dbl-64/s_fma.c (__fma): Use math_force_eval instead of volatile. * sysdeps/ieee754/dbl-64/s_log1p.c (__log1p): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/s_sin.c (__sin): Likewise. * sysdeps/ieee754/dbl-64/s_tan.c (tan): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/dbl-64/s_tanh.c (__tanh): Use math_check_force_underflow. * sysdeps/ieee754/flt-32/e_asinf.c (__ieee754_asinf): Likewise. * sysdeps/ieee754/flt-32/e_atanhf.c (__ieee754_atanhf): Likewise. * sysdeps/ieee754/flt-32/e_exp2f.c (__ieee754_exp2f): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/flt-32/e_gammaf_r.c (__ieee754_gammaf_r): Likewise. * sysdeps/ieee754/flt-32/e_j1f.c (__ieee754_j1f): Use math_check_force_underflow. * sysdeps/ieee754/flt-32/e_jnf.c (__ieee754_jnf): Likewise. * sysdeps/ieee754/flt-32/e_sinhf.c (__ieee754_sinhf): Likewise. * sysdeps/ieee754/flt-32/k_sinf.c (__kernel_sinf): Likewise. * sysdeps/ieee754/flt-32/k_tanf.c (__kernel_tanf): Likewise. * sysdeps/ieee754/flt-32/s_asinhf.c (__asinhf): Likewise. * sysdeps/ieee754/flt-32/s_atanf.c (__atanf): Likewise. * sysdeps/ieee754/flt-32/s_erff.c (__erff): Likewise. * sysdeps/ieee754/flt-32/s_expm1f.c (__expm1f): Likewise. * sysdeps/ieee754/flt-32/s_log1pf.c (__log1pf): Likewise. * sysdeps/ieee754/flt-32/s_tanhf.c (__tanhf): Likewise. * sysdeps/ieee754/ldbl-128/e_asinl.c (__ieee754_asinl): Likewise. * sysdeps/ieee754/ldbl-128/e_atanhl.c (__ieee754_atanhl): Likewise. * sysdeps/ieee754/ldbl-128/e_expl.c (__ieee754_expl): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-128/e_gammal_r.c (__ieee754_gammal_r): Likewise. * sysdeps/ieee754/ldbl-128/e_j1l.c (__ieee754_j1l): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128/e_jnl.c (__ieee754_jnl): Likewise. * sysdeps/ieee754/ldbl-128/e_sinhl.c (__ieee754_sinhl): Likewise. * sysdeps/ieee754/ldbl-128/k_sincosl.c (__kernel_sincosl): Likewise. * sysdeps/ieee754/ldbl-128/k_sinl.c (__kernel_sinl): Likewise. * sysdeps/ieee754/ldbl-128/k_tanl.c (__kernel_tanl): Likewise. * sysdeps/ieee754/ldbl-128/s_asinhl.c (__asinhl): Likewise. * sysdeps/ieee754/ldbl-128/s_atanl.c (__atanl): Likewise. * sysdeps/ieee754/ldbl-128/s_erfl.c (__erfl): Likewise. * sysdeps/ieee754/ldbl-128/s_expm1l.c (__expm1l): Likewise. * sysdeps/ieee754/ldbl-128/s_fmal.c (__fmal): Use math_force_eval instead of volatile. * sysdeps/ieee754/ldbl-128/s_log1pl.c (__log1pl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128/s_tanhl.c (__tanhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/e_asinl.c (__ieee754_asinl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128ibm/e_atanhl.c (__ieee754_atanhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/e_gammal_r.c (__ieee754_gammal_r): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-128ibm/e_jnl.c (__ieee754_jnl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128ibm/e_sinhl.c (__ieee754_sinhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/k_sincosl.c (__kernel_sincosl): Likewise. * sysdeps/ieee754/ldbl-128ibm/k_sinl.c (__kernel_sinl): Likewise. * sysdeps/ieee754/ldbl-128ibm/k_tanl.c (__kernel_tanl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_asinhl.c (__asinhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_atanl.c (__atanl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_erfl.c (__erfl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_tanhl.c (__tanhl): Likewise. * sysdeps/ieee754/ldbl-96/e_asinl.c (__ieee754_asinl): Likewise. * sysdeps/ieee754/ldbl-96/e_atanhl.c (__ieee754_atanhl): Likewise. * sysdeps/ieee754/ldbl-96/e_gammal_r.c (__ieee754_gammal_r): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-96/e_j1l.c (__ieee754_j1l): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-96/e_jnl.c (__ieee754_jnl): Likewise. * sysdeps/ieee754/ldbl-96/e_sinhl.c (__ieee754_sinhl): Likewise. * sysdeps/ieee754/ldbl-96/k_sinl.c (__kernel_sinl): Likewise. * sysdeps/ieee754/ldbl-96/k_tanl.c (__kernel_tanl): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-96/s_asinhl.c (__asinhl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-96/s_erfl.c (__erfl): Likewise. * sysdeps/ieee754/ldbl-96/s_fmal.c (__fmal): Use math_force_eval instead of volatile. * sysdeps/ieee754/ldbl-96/s_tanhl.c (__tanhl): Use math_check_force_underflow.
849 lines
22 KiB
C
849 lines
22 KiB
C
/*
|
|
* IBM Accurate Mathematical Library
|
|
* written by International Business Machines Corp.
|
|
* Copyright (C) 2001-2015 Free Software Foundation, Inc.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU Lesser General Public License as published by
|
|
* the Free Software Foundation; either version 2.1 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public License
|
|
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
/*********************************************************************/
|
|
/* MODULE_NAME: utan.c */
|
|
/* */
|
|
/* FUNCTIONS: utan */
|
|
/* tanMp */
|
|
/* */
|
|
/* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */
|
|
/* branred.c sincos32.c mptan.c */
|
|
/* utan.tbl */
|
|
/* */
|
|
/* An ultimate tan routine. Given an IEEE double machine number x */
|
|
/* it computes the correctly rounded (to nearest) value of tan(x). */
|
|
/* Assumption: Machine arithmetic operations are performed in */
|
|
/* round to nearest mode of IEEE 754 standard. */
|
|
/* */
|
|
/*********************************************************************/
|
|
|
|
#include <errno.h>
|
|
#include <float.h>
|
|
#include "endian.h"
|
|
#include <dla.h>
|
|
#include "mpa.h"
|
|
#include "MathLib.h"
|
|
#include <math.h>
|
|
#include <math_private.h>
|
|
#include <fenv.h>
|
|
#include <stap-probe.h>
|
|
|
|
#ifndef SECTION
|
|
# define SECTION
|
|
#endif
|
|
|
|
static double tanMp (double);
|
|
void __mptan (double, mp_no *, int);
|
|
|
|
double
|
|
SECTION
|
|
tan (double x)
|
|
{
|
|
#include "utan.h"
|
|
#include "utan.tbl"
|
|
|
|
int ux, i, n;
|
|
double a, da, a2, b, db, c, dc, c1, cc1, c2, cc2, c3, cc3, fi, ffi, gi, pz,
|
|
s, sy, t, t1, t2, t3, t4, t7, t8, t9, t10, w, x2, xn, xx2, y, ya,
|
|
yya, z0, z, zz, z2, zz2;
|
|
#ifndef DLA_FMS
|
|
double t5, t6;
|
|
#endif
|
|
int p;
|
|
number num, v;
|
|
mp_no mpa, mpt1, mpt2;
|
|
|
|
double retval;
|
|
|
|
int __branred (double, double *, double *);
|
|
int __mpranred (double, mp_no *, int);
|
|
|
|
SET_RESTORE_ROUND_53BIT (FE_TONEAREST);
|
|
|
|
/* x=+-INF, x=NaN */
|
|
num.d = x;
|
|
ux = num.i[HIGH_HALF];
|
|
if ((ux & 0x7ff00000) == 0x7ff00000)
|
|
{
|
|
if ((ux & 0x7fffffff) == 0x7ff00000)
|
|
__set_errno (EDOM);
|
|
retval = x - x;
|
|
goto ret;
|
|
}
|
|
|
|
w = (x < 0.0) ? -x : x;
|
|
|
|
/* (I) The case abs(x) <= 1.259e-8 */
|
|
if (w <= g1.d)
|
|
{
|
|
math_check_force_underflow_nonneg (w);
|
|
retval = x;
|
|
goto ret;
|
|
}
|
|
|
|
/* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
|
|
if (w <= g2.d)
|
|
{
|
|
/* First stage */
|
|
x2 = x * x;
|
|
|
|
t2 = d9.d + x2 * d11.d;
|
|
t2 = d7.d + x2 * t2;
|
|
t2 = d5.d + x2 * t2;
|
|
t2 = d3.d + x2 * t2;
|
|
t2 *= x * x2;
|
|
|
|
if ((y = x + (t2 - u1.d * t2)) == x + (t2 + u1.d * t2))
|
|
{
|
|
retval = y;
|
|
goto ret;
|
|
}
|
|
|
|
/* Second stage */
|
|
c1 = a25.d + x2 * a27.d;
|
|
c1 = a23.d + x2 * c1;
|
|
c1 = a21.d + x2 * c1;
|
|
c1 = a19.d + x2 * c1;
|
|
c1 = a17.d + x2 * c1;
|
|
c1 = a15.d + x2 * c1;
|
|
c1 *= x2;
|
|
|
|
EMULV (x, x, x2, xx2, t1, t2, t3, t4, t5);
|
|
ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
MUL2 (x, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (x, 0.0, c2, cc2, c1, cc1, t1, t2);
|
|
if ((y = c1 + (cc1 - u2.d * c1)) == c1 + (cc1 + u2.d * c1))
|
|
{
|
|
retval = y;
|
|
goto ret;
|
|
}
|
|
retval = tanMp (x);
|
|
goto ret;
|
|
}
|
|
|
|
/* (III) The case 0.0608 < abs(x) <= 0.787 */
|
|
if (w <= g3.d)
|
|
{
|
|
/* First stage */
|
|
i = ((int) (mfftnhf.d + TWO8 * w));
|
|
z = w - xfg[i][0].d;
|
|
z2 = z * z;
|
|
s = (x < 0.0) ? -1 : 1;
|
|
pz = z + z * z2 * (e0.d + z2 * e1.d);
|
|
fi = xfg[i][1].d;
|
|
gi = xfg[i][2].d;
|
|
t2 = pz * (gi + fi) / (gi - pz);
|
|
if ((y = fi + (t2 - fi * u3.d)) == fi + (t2 + fi * u3.d))
|
|
{
|
|
retval = (s * y);
|
|
goto ret;
|
|
}
|
|
t3 = (t2 < 0.0) ? -t2 : t2;
|
|
t4 = fi * ua3.d + t3 * ub3.d;
|
|
if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
|
|
{
|
|
retval = (s * y);
|
|
goto ret;
|
|
}
|
|
|
|
/* Second stage */
|
|
ffi = xfg[i][3].d;
|
|
c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
|
|
EMULV (z, z, z2, zz2, t1, t2, t3, t4, t5);
|
|
ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
|
|
MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
MUL2 (z, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (z, 0.0, c2, cc2, c1, cc1, t1, t2);
|
|
|
|
ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
|
|
DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
|
|
t10);
|
|
|
|
if ((y = c3 + (cc3 - u4.d * c3)) == c3 + (cc3 + u4.d * c3))
|
|
{
|
|
retval = (s * y);
|
|
goto ret;
|
|
}
|
|
retval = tanMp (x);
|
|
goto ret;
|
|
}
|
|
|
|
/* (---) The case 0.787 < abs(x) <= 25 */
|
|
if (w <= g4.d)
|
|
{
|
|
/* Range reduction by algorithm i */
|
|
t = (x * hpinv.d + toint.d);
|
|
xn = t - toint.d;
|
|
v.d = t;
|
|
t1 = (x - xn * mp1.d) - xn * mp2.d;
|
|
n = v.i[LOW_HALF] & 0x00000001;
|
|
da = xn * mp3.d;
|
|
a = t1 - da;
|
|
da = (t1 - a) - da;
|
|
if (a < 0.0)
|
|
{
|
|
ya = -a;
|
|
yya = -da;
|
|
sy = -1;
|
|
}
|
|
else
|
|
{
|
|
ya = a;
|
|
yya = da;
|
|
sy = 1;
|
|
}
|
|
|
|
/* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
|
|
if (ya <= gy1.d)
|
|
{
|
|
retval = tanMp (x);
|
|
goto ret;
|
|
}
|
|
|
|
/* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
|
|
if (ya <= gy2.d)
|
|
{
|
|
a2 = a * a;
|
|
t2 = d9.d + a2 * d11.d;
|
|
t2 = d7.d + a2 * t2;
|
|
t2 = d5.d + a2 * t2;
|
|
t2 = d3.d + a2 * t2;
|
|
t2 = da + a * a2 * t2;
|
|
|
|
if (n)
|
|
{
|
|
/* First stage -cot */
|
|
EADD (a, t2, b, db);
|
|
DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
|
|
t9, t10);
|
|
if ((y = c + (dc - u6.d * c)) == c + (dc + u6.d * c))
|
|
{
|
|
retval = (-y);
|
|
goto ret;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* First stage tan */
|
|
if ((y = a + (t2 - u5.d * a)) == a + (t2 + u5.d * a))
|
|
{
|
|
retval = y;
|
|
goto ret;
|
|
}
|
|
}
|
|
/* Second stage */
|
|
/* Range reduction by algorithm ii */
|
|
t = (x * hpinv.d + toint.d);
|
|
xn = t - toint.d;
|
|
v.d = t;
|
|
t1 = (x - xn * mp1.d) - xn * mp2.d;
|
|
n = v.i[LOW_HALF] & 0x00000001;
|
|
da = xn * pp3.d;
|
|
t = t1 - da;
|
|
da = (t1 - t) - da;
|
|
t1 = xn * pp4.d;
|
|
a = t - t1;
|
|
da = ((t - a) - t1) + da;
|
|
|
|
/* Second stage */
|
|
EADD (a, da, t1, t2);
|
|
a = t1;
|
|
da = t2;
|
|
MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
|
|
c1 = a25.d + x2 * a27.d;
|
|
c1 = a23.d + x2 * c1;
|
|
c1 = a21.d + x2 * c1;
|
|
c1 = a19.d + x2 * c1;
|
|
c1 = a17.d + x2 * c1;
|
|
c1 = a15.d + x2 * c1;
|
|
c1 *= x2;
|
|
|
|
ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
|
|
|
|
if (n)
|
|
{
|
|
/* Second stage -cot */
|
|
DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
|
|
t8, t9, t10);
|
|
if ((y = c2 + (cc2 - u8.d * c2)) == c2 + (cc2 + u8.d * c2))
|
|
{
|
|
retval = (-y);
|
|
goto ret;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* Second stage tan */
|
|
if ((y = c1 + (cc1 - u7.d * c1)) == c1 + (cc1 + u7.d * c1))
|
|
{
|
|
retval = y;
|
|
goto ret;
|
|
}
|
|
}
|
|
retval = tanMp (x);
|
|
goto ret;
|
|
}
|
|
|
|
/* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
|
|
|
|
/* First stage */
|
|
i = ((int) (mfftnhf.d + TWO8 * ya));
|
|
z = (z0 = (ya - xfg[i][0].d)) + yya;
|
|
z2 = z * z;
|
|
pz = z + z * z2 * (e0.d + z2 * e1.d);
|
|
fi = xfg[i][1].d;
|
|
gi = xfg[i][2].d;
|
|
|
|
if (n)
|
|
{
|
|
/* -cot */
|
|
t2 = pz * (fi + gi) / (fi + pz);
|
|
if ((y = gi - (t2 - gi * u10.d)) == gi - (t2 + gi * u10.d))
|
|
{
|
|
retval = (-sy * y);
|
|
goto ret;
|
|
}
|
|
t3 = (t2 < 0.0) ? -t2 : t2;
|
|
t4 = gi * ua10.d + t3 * ub10.d;
|
|
if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
|
|
{
|
|
retval = (-sy * y);
|
|
goto ret;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* tan */
|
|
t2 = pz * (gi + fi) / (gi - pz);
|
|
if ((y = fi + (t2 - fi * u9.d)) == fi + (t2 + fi * u9.d))
|
|
{
|
|
retval = (sy * y);
|
|
goto ret;
|
|
}
|
|
t3 = (t2 < 0.0) ? -t2 : t2;
|
|
t4 = fi * ua9.d + t3 * ub9.d;
|
|
if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
|
|
{
|
|
retval = (sy * y);
|
|
goto ret;
|
|
}
|
|
}
|
|
|
|
/* Second stage */
|
|
ffi = xfg[i][3].d;
|
|
EADD (z0, yya, z, zz)
|
|
MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
|
|
ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
|
|
MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
|
|
|
|
ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
|
|
|
|
if (n)
|
|
{
|
|
/* -cot */
|
|
DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
|
|
t10);
|
|
if ((y = c3 + (cc3 - u12.d * c3)) == c3 + (cc3 + u12.d * c3))
|
|
{
|
|
retval = (-sy * y);
|
|
goto ret;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* tan */
|
|
DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
|
|
t10);
|
|
if ((y = c3 + (cc3 - u11.d * c3)) == c3 + (cc3 + u11.d * c3))
|
|
{
|
|
retval = (sy * y);
|
|
goto ret;
|
|
}
|
|
}
|
|
|
|
retval = tanMp (x);
|
|
goto ret;
|
|
}
|
|
|
|
/* (---) The case 25 < abs(x) <= 1e8 */
|
|
if (w <= g5.d)
|
|
{
|
|
/* Range reduction by algorithm ii */
|
|
t = (x * hpinv.d + toint.d);
|
|
xn = t - toint.d;
|
|
v.d = t;
|
|
t1 = (x - xn * mp1.d) - xn * mp2.d;
|
|
n = v.i[LOW_HALF] & 0x00000001;
|
|
da = xn * pp3.d;
|
|
t = t1 - da;
|
|
da = (t1 - t) - da;
|
|
t1 = xn * pp4.d;
|
|
a = t - t1;
|
|
da = ((t - a) - t1) + da;
|
|
EADD (a, da, t1, t2);
|
|
a = t1;
|
|
da = t2;
|
|
if (a < 0.0)
|
|
{
|
|
ya = -a;
|
|
yya = -da;
|
|
sy = -1;
|
|
}
|
|
else
|
|
{
|
|
ya = a;
|
|
yya = da;
|
|
sy = 1;
|
|
}
|
|
|
|
/* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
|
|
if (ya <= gy1.d)
|
|
{
|
|
retval = tanMp (x);
|
|
goto ret;
|
|
}
|
|
|
|
/* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
|
|
if (ya <= gy2.d)
|
|
{
|
|
a2 = a * a;
|
|
t2 = d9.d + a2 * d11.d;
|
|
t2 = d7.d + a2 * t2;
|
|
t2 = d5.d + a2 * t2;
|
|
t2 = d3.d + a2 * t2;
|
|
t2 = da + a * a2 * t2;
|
|
|
|
if (n)
|
|
{
|
|
/* First stage -cot */
|
|
EADD (a, t2, b, db);
|
|
DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
|
|
t9, t10);
|
|
if ((y = c + (dc - u14.d * c)) == c + (dc + u14.d * c))
|
|
{
|
|
retval = (-y);
|
|
goto ret;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* First stage tan */
|
|
if ((y = a + (t2 - u13.d * a)) == a + (t2 + u13.d * a))
|
|
{
|
|
retval = y;
|
|
goto ret;
|
|
}
|
|
}
|
|
|
|
/* Second stage */
|
|
MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
c1 = a25.d + x2 * a27.d;
|
|
c1 = a23.d + x2 * c1;
|
|
c1 = a21.d + x2 * c1;
|
|
c1 = a19.d + x2 * c1;
|
|
c1 = a17.d + x2 * c1;
|
|
c1 = a15.d + x2 * c1;
|
|
c1 *= x2;
|
|
|
|
ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
|
|
|
|
if (n)
|
|
{
|
|
/* Second stage -cot */
|
|
DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
|
|
t8, t9, t10);
|
|
if ((y = c2 + (cc2 - u16.d * c2)) == c2 + (cc2 + u16.d * c2))
|
|
{
|
|
retval = (-y);
|
|
goto ret;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* Second stage tan */
|
|
if ((y = c1 + (cc1 - u15.d * c1)) == c1 + (cc1 + u15.d * c1))
|
|
{
|
|
retval = (y);
|
|
goto ret;
|
|
}
|
|
}
|
|
retval = tanMp (x);
|
|
goto ret;
|
|
}
|
|
|
|
/* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
|
|
/* First stage */
|
|
i = ((int) (mfftnhf.d + TWO8 * ya));
|
|
z = (z0 = (ya - xfg[i][0].d)) + yya;
|
|
z2 = z * z;
|
|
pz = z + z * z2 * (e0.d + z2 * e1.d);
|
|
fi = xfg[i][1].d;
|
|
gi = xfg[i][2].d;
|
|
|
|
if (n)
|
|
{
|
|
/* -cot */
|
|
t2 = pz * (fi + gi) / (fi + pz);
|
|
if ((y = gi - (t2 - gi * u18.d)) == gi - (t2 + gi * u18.d))
|
|
{
|
|
retval = (-sy * y);
|
|
goto ret;
|
|
}
|
|
t3 = (t2 < 0.0) ? -t2 : t2;
|
|
t4 = gi * ua18.d + t3 * ub18.d;
|
|
if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
|
|
{
|
|
retval = (-sy * y);
|
|
goto ret;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* tan */
|
|
t2 = pz * (gi + fi) / (gi - pz);
|
|
if ((y = fi + (t2 - fi * u17.d)) == fi + (t2 + fi * u17.d))
|
|
{
|
|
retval = (sy * y);
|
|
goto ret;
|
|
}
|
|
t3 = (t2 < 0.0) ? -t2 : t2;
|
|
t4 = fi * ua17.d + t3 * ub17.d;
|
|
if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
|
|
{
|
|
retval = (sy * y);
|
|
goto ret;
|
|
}
|
|
}
|
|
|
|
/* Second stage */
|
|
ffi = xfg[i][3].d;
|
|
EADD (z0, yya, z, zz);
|
|
MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
|
|
ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
|
|
MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
|
|
|
|
ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
|
|
|
|
if (n)
|
|
{
|
|
/* -cot */
|
|
DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
|
|
t10);
|
|
if ((y = c3 + (cc3 - u20.d * c3)) == c3 + (cc3 + u20.d * c3))
|
|
{
|
|
retval = (-sy * y);
|
|
goto ret;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* tan */
|
|
DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
|
|
t10);
|
|
if ((y = c3 + (cc3 - u19.d * c3)) == c3 + (cc3 + u19.d * c3))
|
|
{
|
|
retval = (sy * y);
|
|
goto ret;
|
|
}
|
|
}
|
|
retval = tanMp (x);
|
|
goto ret;
|
|
}
|
|
|
|
/* (---) The case 1e8 < abs(x) < 2**1024 */
|
|
/* Range reduction by algorithm iii */
|
|
n = (__branred (x, &a, &da)) & 0x00000001;
|
|
EADD (a, da, t1, t2);
|
|
a = t1;
|
|
da = t2;
|
|
if (a < 0.0)
|
|
{
|
|
ya = -a;
|
|
yya = -da;
|
|
sy = -1;
|
|
}
|
|
else
|
|
{
|
|
ya = a;
|
|
yya = da;
|
|
sy = 1;
|
|
}
|
|
|
|
/* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
|
|
if (ya <= gy1.d)
|
|
{
|
|
retval = tanMp (x);
|
|
goto ret;
|
|
}
|
|
|
|
/* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
|
|
if (ya <= gy2.d)
|
|
{
|
|
a2 = a * a;
|
|
t2 = d9.d + a2 * d11.d;
|
|
t2 = d7.d + a2 * t2;
|
|
t2 = d5.d + a2 * t2;
|
|
t2 = d3.d + a2 * t2;
|
|
t2 = da + a * a2 * t2;
|
|
if (n)
|
|
{
|
|
/* First stage -cot */
|
|
EADD (a, t2, b, db);
|
|
DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8, t9,
|
|
t10);
|
|
if ((y = c + (dc - u22.d * c)) == c + (dc + u22.d * c))
|
|
{
|
|
retval = (-y);
|
|
goto ret;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* First stage tan */
|
|
if ((y = a + (t2 - u21.d * a)) == a + (t2 + u21.d * a))
|
|
{
|
|
retval = y;
|
|
goto ret;
|
|
}
|
|
}
|
|
|
|
/* Second stage */
|
|
/* Reduction by algorithm iv */
|
|
p = 10;
|
|
n = (__mpranred (x, &mpa, p)) & 0x00000001;
|
|
__mp_dbl (&mpa, &a, p);
|
|
__dbl_mp (a, &mpt1, p);
|
|
__sub (&mpa, &mpt1, &mpt2, p);
|
|
__mp_dbl (&mpt2, &da, p);
|
|
|
|
MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
|
|
c1 = a25.d + x2 * a27.d;
|
|
c1 = a23.d + x2 * c1;
|
|
c1 = a21.d + x2 * c1;
|
|
c1 = a19.d + x2 * c1;
|
|
c1 = a17.d + x2 * c1;
|
|
c1 = a15.d + x2 * c1;
|
|
c1 *= x2;
|
|
|
|
ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
|
|
|
|
if (n)
|
|
{
|
|
/* Second stage -cot */
|
|
DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8,
|
|
t9, t10);
|
|
if ((y = c2 + (cc2 - u24.d * c2)) == c2 + (cc2 + u24.d * c2))
|
|
{
|
|
retval = (-y);
|
|
goto ret;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* Second stage tan */
|
|
if ((y = c1 + (cc1 - u23.d * c1)) == c1 + (cc1 + u23.d * c1))
|
|
{
|
|
retval = y;
|
|
goto ret;
|
|
}
|
|
}
|
|
retval = tanMp (x);
|
|
goto ret;
|
|
}
|
|
|
|
/* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
|
|
/* First stage */
|
|
i = ((int) (mfftnhf.d + TWO8 * ya));
|
|
z = (z0 = (ya - xfg[i][0].d)) + yya;
|
|
z2 = z * z;
|
|
pz = z + z * z2 * (e0.d + z2 * e1.d);
|
|
fi = xfg[i][1].d;
|
|
gi = xfg[i][2].d;
|
|
|
|
if (n)
|
|
{
|
|
/* -cot */
|
|
t2 = pz * (fi + gi) / (fi + pz);
|
|
if ((y = gi - (t2 - gi * u26.d)) == gi - (t2 + gi * u26.d))
|
|
{
|
|
retval = (-sy * y);
|
|
goto ret;
|
|
}
|
|
t3 = (t2 < 0.0) ? -t2 : t2;
|
|
t4 = gi * ua26.d + t3 * ub26.d;
|
|
if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
|
|
{
|
|
retval = (-sy * y);
|
|
goto ret;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* tan */
|
|
t2 = pz * (gi + fi) / (gi - pz);
|
|
if ((y = fi + (t2 - fi * u25.d)) == fi + (t2 + fi * u25.d))
|
|
{
|
|
retval = (sy * y);
|
|
goto ret;
|
|
}
|
|
t3 = (t2 < 0.0) ? -t2 : t2;
|
|
t4 = fi * ua25.d + t3 * ub25.d;
|
|
if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
|
|
{
|
|
retval = (sy * y);
|
|
goto ret;
|
|
}
|
|
}
|
|
|
|
/* Second stage */
|
|
ffi = xfg[i][3].d;
|
|
EADD (z0, yya, z, zz);
|
|
MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
|
|
ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
|
|
MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
|
|
|
|
ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
|
|
MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
|
|
SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
|
|
|
|
if (n)
|
|
{
|
|
/* -cot */
|
|
DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
|
|
t10);
|
|
if ((y = c3 + (cc3 - u28.d * c3)) == c3 + (cc3 + u28.d * c3))
|
|
{
|
|
retval = (-sy * y);
|
|
goto ret;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* tan */
|
|
DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
|
|
t10);
|
|
if ((y = c3 + (cc3 - u27.d * c3)) == c3 + (cc3 + u27.d * c3))
|
|
{
|
|
retval = (sy * y);
|
|
goto ret;
|
|
}
|
|
}
|
|
retval = tanMp (x);
|
|
goto ret;
|
|
|
|
ret:
|
|
return retval;
|
|
}
|
|
|
|
/* multiple precision stage */
|
|
/* Convert x to multi precision number,compute tan(x) by mptan() routine */
|
|
/* and converts result back to double */
|
|
static double
|
|
SECTION
|
|
tanMp (double x)
|
|
{
|
|
int p;
|
|
double y;
|
|
mp_no mpy;
|
|
p = 32;
|
|
__mptan (x, &mpy, p);
|
|
__mp_dbl (&mpy, &y, p);
|
|
LIBC_PROBE (slowtan, 2, &x, &y);
|
|
return y;
|
|
}
|
|
|
|
#ifdef NO_LONG_DOUBLE
|
|
weak_alias (tan, tanl)
|
|
#endif
|