glibc/sysdeps/ieee754/ldbl-128/e_gammal_r.c
Joseph Myers 9755bc4686 Use round functions not __round functions in glibc libm.
Continuing the move to use, within libm, public names for libm
functions that can be inlined as built-in functions on many
architectures, this patch moves calls to __round functions to call the
corresponding round names instead, with asm redirection to __round
when the calls are not inlined.

An additional complication arises in
sysdeps/ieee754/ldbl-128ibm/e_expl.c, where a call to roundl, with the
result converted to int, gets converted by the compiler to call
lroundl in the case of 32-bit long, so resulting in localplt test
failures.  It's logically correct to let the compiler make such an
optimization; an appropriate asm redirection of lroundl to __lroundl
is thus added to that file (it's not needed anywhere else).

Tested for x86_64, and with build-many-glibcs.py.

	* include/math.h [!_ISOMAC && !(__FINITE_MATH_ONLY__ &&
	__FINITE_MATH_ONLY__ > 0) && !NO_MATH_REDIRECT] (round): Redirect
	using MATH_REDIRECT.
	* sysdeps/aarch64/fpu/s_round.c: Define NO_MATH_REDIRECT before
	header inclusion.
	* sysdeps/aarch64/fpu/s_roundf.c: Likewise.
	* sysdeps/ieee754/dbl-64/s_round.c: Likewise.
	* sysdeps/ieee754/dbl-64/wordsize-64/s_round.c: Likewise.
	* sysdeps/ieee754/float128/s_roundf128.c: Likewise.
	* sysdeps/ieee754/flt-32/s_roundf.c: Likewise.
	* sysdeps/ieee754/ldbl-128/s_roundl.c: Likewise.
	* sysdeps/ieee754/ldbl-96/s_roundl.c: Likewise.
	* sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_round.c: Likewise.
	* sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_roundf.c: Likewise.
	* sysdeps/powerpc/powerpc64/fpu/multiarch/s_round.c: Likewise.
	* sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf.c: Likewise.
	* sysdeps/riscv/rv64/rvd/s_round.c: Likewise.
	* sysdeps/riscv/rvf/s_roundf.c: Likewise.
	* sysdeps/ieee754/ldbl-128ibm/s_roundl.c: Likewise.
	(round): Redirect to __round.
	(__roundl): Call round instead of __round.
	* sysdeps/powerpc/fpu/math_private.h [_ARCH_PWR5X] (__round):
	Remove macro.
	[_ARCH_PWR5X] (__roundf): Likewise.
	* sysdeps/ieee754/dbl-64/e_gamma_r.c (gamma_positive): Use round
	functions instead of __round variants.
	* sysdeps/ieee754/flt-32/e_gammaf_r.c (gammaf_positive): Likewise.
	* sysdeps/ieee754/ldbl-128/e_gammal_r.c (gammal_positive):
	Likewise.
	* sysdeps/ieee754/ldbl-128ibm/e_gammal_r.c (gammal_positive):
	Likewise.
	* sysdeps/ieee754/ldbl-96/e_gammal_r.c (gammal_positive):
	Likewise.
	* sysdeps/x86/fpu/powl_helper.c (__powl_helper): Likewise.
	* sysdeps/ieee754/ldbl-128ibm/e_expl.c (lroundl): Redirect to
	__lroundl.
	(__ieee754_expl): Call roundl instead of __roundl.
2018-09-27 12:35:23 +00:00

221 lines
6.4 KiB
C

/* Implementation of gamma function according to ISO C.
Copyright (C) 1997-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997 and
Jakub Jelinek <jj@ultra.linux.cz, 1999.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <math.h>
#include <math_private.h>
#include <fenv_private.h>
#include <math-underflow.h>
#include <float.h>
/* Coefficients B_2k / 2k(2k-1) of x^-(2k-1) inside exp in Stirling's
approximation to gamma function. */
static const _Float128 gamma_coeff[] =
{
L(0x1.5555555555555555555555555555p-4),
L(-0xb.60b60b60b60b60b60b60b60b60b8p-12),
L(0x3.4034034034034034034034034034p-12),
L(-0x2.7027027027027027027027027028p-12),
L(0x3.72a3c5631fe46ae1d4e700dca8f2p-12),
L(-0x7.daac36664f1f207daac36664f1f4p-12),
L(0x1.a41a41a41a41a41a41a41a41a41ap-8),
L(-0x7.90a1b2c3d4e5f708192a3b4c5d7p-8),
L(0x2.dfd2c703c0cfff430edfd2c703cp-4),
L(-0x1.6476701181f39edbdb9ce625987dp+0),
L(0xd.672219167002d3a7a9c886459cp+0),
L(-0x9.cd9292e6660d55b3f712eb9e07c8p+4),
L(0x8.911a740da740da740da740da741p+8),
L(-0x8.d0cc570e255bf59ff6eec24b49p+12),
};
#define NCOEFF (sizeof (gamma_coeff) / sizeof (gamma_coeff[0]))
/* Return gamma (X), for positive X less than 1775, in the form R *
2^(*EXP2_ADJ), where R is the return value and *EXP2_ADJ is set to
avoid overflow or underflow in intermediate calculations. */
static _Float128
gammal_positive (_Float128 x, int *exp2_adj)
{
int local_signgam;
if (x < L(0.5))
{
*exp2_adj = 0;
return __ieee754_expl (__ieee754_lgammal_r (x + 1, &local_signgam)) / x;
}
else if (x <= L(1.5))
{
*exp2_adj = 0;
return __ieee754_expl (__ieee754_lgammal_r (x, &local_signgam));
}
else if (x < L(12.5))
{
/* Adjust into the range for using exp (lgamma). */
*exp2_adj = 0;
_Float128 n = ceill (x - L(1.5));
_Float128 x_adj = x - n;
_Float128 eps;
_Float128 prod = __gamma_productl (x_adj, 0, n, &eps);
return (__ieee754_expl (__ieee754_lgammal_r (x_adj, &local_signgam))
* prod * (1 + eps));
}
else
{
_Float128 eps = 0;
_Float128 x_eps = 0;
_Float128 x_adj = x;
_Float128 prod = 1;
if (x < 24)
{
/* Adjust into the range for applying Stirling's
approximation. */
_Float128 n = ceill (24 - x);
x_adj = x + n;
x_eps = (x - (x_adj - n));
prod = __gamma_productl (x_adj - n, x_eps, n, &eps);
}
/* The result is now gamma (X_ADJ + X_EPS) / (PROD * (1 + EPS)).
Compute gamma (X_ADJ + X_EPS) using Stirling's approximation,
starting by computing pow (X_ADJ, X_ADJ) with a power of 2
factored out. */
_Float128 exp_adj = -eps;
_Float128 x_adj_int = roundl (x_adj);
_Float128 x_adj_frac = x_adj - x_adj_int;
int x_adj_log2;
_Float128 x_adj_mant = __frexpl (x_adj, &x_adj_log2);
if (x_adj_mant < M_SQRT1_2l)
{
x_adj_log2--;
x_adj_mant *= 2;
}
*exp2_adj = x_adj_log2 * (int) x_adj_int;
_Float128 ret = (__ieee754_powl (x_adj_mant, x_adj)
* __ieee754_exp2l (x_adj_log2 * x_adj_frac)
* __ieee754_expl (-x_adj)
* sqrtl (2 * M_PIl / x_adj)
/ prod);
exp_adj += x_eps * __ieee754_logl (x_adj);
_Float128 bsum = gamma_coeff[NCOEFF - 1];
_Float128 x_adj2 = x_adj * x_adj;
for (size_t i = 1; i <= NCOEFF - 1; i++)
bsum = bsum / x_adj2 + gamma_coeff[NCOEFF - 1 - i];
exp_adj += bsum / x_adj;
return ret + ret * __expm1l (exp_adj);
}
}
_Float128
__ieee754_gammal_r (_Float128 x, int *signgamp)
{
int64_t hx;
uint64_t lx;
_Float128 ret;
GET_LDOUBLE_WORDS64 (hx, lx, x);
if (((hx & 0x7fffffffffffffffLL) | lx) == 0)
{
/* Return value for x == 0 is Inf with divide by zero exception. */
*signgamp = 0;
return 1.0 / x;
}
if (hx < 0 && (uint64_t) hx < 0xffff000000000000ULL && rintl (x) == x)
{
/* Return value for integer x < 0 is NaN with invalid exception. */
*signgamp = 0;
return (x - x) / (x - x);
}
if (hx == 0xffff000000000000ULL && lx == 0)
{
/* x == -Inf. According to ISO this is NaN. */
*signgamp = 0;
return x - x;
}
if ((hx & 0x7fff000000000000ULL) == 0x7fff000000000000ULL)
{
/* Positive infinity (return positive infinity) or NaN (return
NaN). */
*signgamp = 0;
return x + x;
}
if (x >= 1756)
{
/* Overflow. */
*signgamp = 0;
return LDBL_MAX * LDBL_MAX;
}
else
{
SET_RESTORE_ROUNDL (FE_TONEAREST);
if (x > 0)
{
*signgamp = 0;
int exp2_adj;
ret = gammal_positive (x, &exp2_adj);
ret = __scalbnl (ret, exp2_adj);
}
else if (x >= -LDBL_EPSILON / 4)
{
*signgamp = 0;
ret = 1 / x;
}
else
{
_Float128 tx = truncl (x);
*signgamp = (tx == 2 * truncl (tx / 2)) ? -1 : 1;
if (x <= -1775)
/* Underflow. */
ret = LDBL_MIN * LDBL_MIN;
else
{
_Float128 frac = tx - x;
if (frac > L(0.5))
frac = 1 - frac;
_Float128 sinpix = (frac <= L(0.25)
? __sinl (M_PIl * frac)
: __cosl (M_PIl * (L(0.5) - frac)));
int exp2_adj;
ret = M_PIl / (-x * sinpix
* gammal_positive (-x, &exp2_adj));
ret = __scalbnl (ret, -exp2_adj);
math_check_force_underflow_nonneg (ret);
}
}
}
if (isinf (ret) && x != 0)
{
if (*signgamp < 0)
return -(-__copysignl (LDBL_MAX, ret) * LDBL_MAX);
else
return __copysignl (LDBL_MAX, ret) * LDBL_MAX;
}
else if (ret == 0)
{
if (*signgamp < 0)
return -(-__copysignl (LDBL_MIN, ret) * LDBL_MIN);
else
return __copysignl (LDBL_MIN, ret) * LDBL_MIN;
}
else
return ret;
}
strong_alias (__ieee754_gammal_r, __gammal_r_finite)