glibc/math/math-narrow.h
Joseph Myers 70e2ba332f Do not include fenv_private.h in math_private.h.
Continuing the clean-up related to the catch-all math_private.h
header, this patch stops math_private.h from including fenv_private.h.
Instead, fenv_private.h is included directly from those users of
math_private.h that also used interfaces from fenv_private.h.  No
attempt is made to remove unused includes of math_private.h, but that
is a natural followup.

(However, since math_private.h sometimes defines optimized versions of
math.h interfaces or __* variants thereof, as well as defining its own
interfaces, I think it might make sense to get all those optimized
versions included from include/math.h, not requiring a separate header
at all, before eliminating unused math_private.h includes - that
avoids a file quietly becoming less-optimized if someone adds a call
to one of those interfaces without restoring a math_private.h include
to that file.)

There is still a pitfall that if code uses plain fe* and __fe*
interfaces, but only includes fenv.h and not fenv_private.h or (before
this patch) math_private.h, it will compile on platforms with
exceptions and rounding modes but not get the optimized versions (and
possibly not compile) on platforms without exception and rounding mode
support, so making it easy to break the build for such platforms
accidentally.

I think it would be most natural to move the inlines / macros for fe*
and __fe* in the case of no exceptions and rounding modes into
include/fenv.h, so that all code including fenv.h with _ISOMAC not
defined automatically gets them.  Then fenv_private.h would be purely
the header for the libc_fe*, SET_RESTORE_ROUND etc. internal
interfaces and the risk of breaking the build on other platforms than
the one you tested on because of a missing fenv_private.h include
would be much reduced (and there would be some unused fenv_private.h
includes to remove along with unused math_private.h includes).

Tested for x86_64 and x86, and tested with build-many-glibcs.py that
installed stripped shared libraries are unchanged by this patch.

	* sysdeps/generic/math_private.h: Do not include <fenv_private.h>.
	* math/fromfp.h: Include <fenv_private.h>.
	* math/math-narrow.h: Likewise.
	* math/s_cexp_template.c: Likewise.
	* math/s_csin_template.c: Likewise.
	* math/s_csinh_template.c: Likewise.
	* math/s_ctan_template.c: Likewise.
	* math/s_ctanh_template.c: Likewise.
	* math/s_iseqsig_template.c: Likewise.
	* math/w_acos_compat.c: Likewise.
	* math/w_acosf_compat.c: Likewise.
	* math/w_acosl_compat.c: Likewise.
	* math/w_asin_compat.c: Likewise.
	* math/w_asinf_compat.c: Likewise.
	* math/w_asinl_compat.c: Likewise.
	* math/w_ilogb_template.c: Likewise.
	* math/w_j0_compat.c: Likewise.
	* math/w_j0f_compat.c: Likewise.
	* math/w_j0l_compat.c: Likewise.
	* math/w_j1_compat.c: Likewise.
	* math/w_j1f_compat.c: Likewise.
	* math/w_j1l_compat.c: Likewise.
	* math/w_jn_compat.c: Likewise.
	* math/w_jnf_compat.c: Likewise.
	* math/w_llogb_template.c: Likewise.
	* math/w_log10_compat.c: Likewise.
	* math/w_log10f_compat.c: Likewise.
	* math/w_log10l_compat.c: Likewise.
	* math/w_log2_compat.c: Likewise.
	* math/w_log2f_compat.c: Likewise.
	* math/w_log2l_compat.c: Likewise.
	* math/w_log_compat.c: Likewise.
	* math/w_logf_compat.c: Likewise.
	* math/w_logl_compat.c: Likewise.
	* sysdeps/aarch64/fpu/feholdexcpt.c: Likewise.
	* sysdeps/aarch64/fpu/fesetround.c: Likewise.
	* sysdeps/aarch64/fpu/fgetexcptflg.c: Likewise.
	* sysdeps/aarch64/fpu/ftestexcept.c: Likewise.
	* sysdeps/ieee754/dbl-64/e_atan2.c: Likewise.
	* sysdeps/ieee754/dbl-64/e_exp.c: Likewise.
	* sysdeps/ieee754/dbl-64/e_exp2.c: Likewise.
	* sysdeps/ieee754/dbl-64/e_gamma_r.c: Likewise.
	* sysdeps/ieee754/dbl-64/e_jn.c: Likewise.
	* sysdeps/ieee754/dbl-64/e_pow.c: Likewise.
	* sysdeps/ieee754/dbl-64/e_remainder.c: Likewise.
	* sysdeps/ieee754/dbl-64/e_sqrt.c: Likewise.
	* sysdeps/ieee754/dbl-64/gamma_product.c: Likewise.
	* sysdeps/ieee754/dbl-64/lgamma_neg.c: Likewise.
	* sysdeps/ieee754/dbl-64/s_atan.c: Likewise.
	* sysdeps/ieee754/dbl-64/s_fma.c: Likewise.
	* sysdeps/ieee754/dbl-64/s_fmaf.c: Likewise.
	* sysdeps/ieee754/dbl-64/s_llrint.c: Likewise.
	* sysdeps/ieee754/dbl-64/s_llround.c: Likewise.
	* sysdeps/ieee754/dbl-64/s_lrint.c: Likewise.
	* sysdeps/ieee754/dbl-64/s_lround.c: Likewise.
	* sysdeps/ieee754/dbl-64/s_nearbyint.c: Likewise.
	* sysdeps/ieee754/dbl-64/s_sin.c: Likewise.
	* sysdeps/ieee754/dbl-64/s_sincos.c: Likewise.
	* sysdeps/ieee754/dbl-64/s_tan.c: Likewise.
	* sysdeps/ieee754/dbl-64/wordsize-64/s_lround.c: Likewise.
	* sysdeps/ieee754/dbl-64/wordsize-64/s_nearbyint.c: Likewise.
	* sysdeps/ieee754/dbl-64/x2y2m1.c: Likewise.
	* sysdeps/ieee754/float128/float128_private.h: Likewise.
	* sysdeps/ieee754/flt-32/e_gammaf_r.c: Likewise.
	* sysdeps/ieee754/flt-32/e_j1f.c: Likewise.
	* sysdeps/ieee754/flt-32/e_jnf.c: Likewise.
	* sysdeps/ieee754/flt-32/lgamma_negf.c: Likewise.
	* sysdeps/ieee754/flt-32/s_llrintf.c: Likewise.
	* sysdeps/ieee754/flt-32/s_llroundf.c: Likewise.
	* sysdeps/ieee754/flt-32/s_lrintf.c: Likewise.
	* sysdeps/ieee754/flt-32/s_lroundf.c: Likewise.
	* sysdeps/ieee754/flt-32/s_nearbyintf.c: Likewise.
	* sysdeps/ieee754/k_standardl.c: Likewise.
	* sysdeps/ieee754/ldbl-128/e_expl.c: Likewise.
	* sysdeps/ieee754/ldbl-128/e_gammal_r.c: Likewise.
	* sysdeps/ieee754/ldbl-128/e_j1l.c: Likewise.
	* sysdeps/ieee754/ldbl-128/e_jnl.c: Likewise.
	* sysdeps/ieee754/ldbl-128/gamma_productl.c: Likewise.
	* sysdeps/ieee754/ldbl-128/lgamma_negl.c: Likewise.
	* sysdeps/ieee754/ldbl-128/s_fmal.c: Likewise.
	* sysdeps/ieee754/ldbl-128/s_llrintl.c: Likewise.
	* sysdeps/ieee754/ldbl-128/s_llroundl.c: Likewise.
	* sysdeps/ieee754/ldbl-128/s_lrintl.c: Likewise.
	* sysdeps/ieee754/ldbl-128/s_lroundl.c: Likewise.
	* sysdeps/ieee754/ldbl-128/s_nearbyintl.c: Likewise.
	* sysdeps/ieee754/ldbl-128/x2y2m1l.c: Likewise.
	* sysdeps/ieee754/ldbl-128ibm/e_expl.c: Likewise.
	* sysdeps/ieee754/ldbl-128ibm/e_gammal_r.c: Likewise.
	* sysdeps/ieee754/ldbl-128ibm/e_j1l.c: Likewise.
	* sysdeps/ieee754/ldbl-128ibm/e_jnl.c: Likewise.
	* sysdeps/ieee754/ldbl-128ibm/lgamma_negl.c: Likewise.
	* sysdeps/ieee754/ldbl-128ibm/s_fmal.c: Likewise.
	* sysdeps/ieee754/ldbl-128ibm/s_llrintl.c: Likewise.
	* sysdeps/ieee754/ldbl-128ibm/s_llroundl.c: Likewise.
	* sysdeps/ieee754/ldbl-128ibm/s_lrintl.c: Likewise.
	* sysdeps/ieee754/ldbl-128ibm/s_lroundl.c: Likewise.
	* sysdeps/ieee754/ldbl-128ibm/s_rintl.c: Likewise.
	* sysdeps/ieee754/ldbl-128ibm/x2y2m1l.c: Likewise.
	* sysdeps/ieee754/ldbl-96/e_gammal_r.c: Likewise.
	* sysdeps/ieee754/ldbl-96/e_jnl.c: Likewise.
	* sysdeps/ieee754/ldbl-96/gamma_productl.c: Likewise.
	* sysdeps/ieee754/ldbl-96/lgamma_negl.c: Likewise.
	* sysdeps/ieee754/ldbl-96/s_fma.c: Likewise.
	* sysdeps/ieee754/ldbl-96/s_fmal.c: Likewise.
	* sysdeps/ieee754/ldbl-96/s_llrintl.c: Likewise.
	* sysdeps/ieee754/ldbl-96/s_llroundl.c: Likewise.
	* sysdeps/ieee754/ldbl-96/s_lrintl.c: Likewise.
	* sysdeps/ieee754/ldbl-96/s_lroundl.c: Likewise.
	* sysdeps/ieee754/ldbl-96/x2y2m1l.c: Likewise.
	* sysdeps/powerpc/fpu/e_sqrt.c: Likewise.
	* sysdeps/powerpc/fpu/e_sqrtf.c: Likewise.
	* sysdeps/riscv/rv64/rvd/s_ceil.c: Likewise.
	* sysdeps/riscv/rv64/rvd/s_floor.c: Likewise.
	* sysdeps/riscv/rv64/rvd/s_nearbyint.c: Likewise.
	* sysdeps/riscv/rv64/rvd/s_round.c: Likewise.
	* sysdeps/riscv/rv64/rvd/s_roundeven.c: Likewise.
	* sysdeps/riscv/rv64/rvd/s_trunc.c: Likewise.
	* sysdeps/riscv/rvd/s_finite.c: Likewise.
	* sysdeps/riscv/rvd/s_fmax.c: Likewise.
	* sysdeps/riscv/rvd/s_fmin.c: Likewise.
	* sysdeps/riscv/rvd/s_fpclassify.c: Likewise.
	* sysdeps/riscv/rvd/s_isinf.c: Likewise.
	* sysdeps/riscv/rvd/s_isnan.c: Likewise.
	* sysdeps/riscv/rvd/s_issignaling.c: Likewise.
	* sysdeps/riscv/rvf/fegetround.c: Likewise.
	* sysdeps/riscv/rvf/feholdexcpt.c: Likewise.
	* sysdeps/riscv/rvf/fesetenv.c: Likewise.
	* sysdeps/riscv/rvf/fesetround.c: Likewise.
	* sysdeps/riscv/rvf/feupdateenv.c: Likewise.
	* sysdeps/riscv/rvf/fgetexcptflg.c: Likewise.
	* sysdeps/riscv/rvf/ftestexcept.c: Likewise.
	* sysdeps/riscv/rvf/s_ceilf.c: Likewise.
	* sysdeps/riscv/rvf/s_finitef.c: Likewise.
	* sysdeps/riscv/rvf/s_floorf.c: Likewise.
	* sysdeps/riscv/rvf/s_fmaxf.c: Likewise.
	* sysdeps/riscv/rvf/s_fminf.c: Likewise.
	* sysdeps/riscv/rvf/s_fpclassifyf.c: Likewise.
	* sysdeps/riscv/rvf/s_isinff.c: Likewise.
	* sysdeps/riscv/rvf/s_isnanf.c: Likewise.
	* sysdeps/riscv/rvf/s_issignalingf.c: Likewise.
	* sysdeps/riscv/rvf/s_nearbyintf.c: Likewise.
	* sysdeps/riscv/rvf/s_roundevenf.c: Likewise.
	* sysdeps/riscv/rvf/s_roundf.c: Likewise.
	* sysdeps/riscv/rvf/s_truncf.c: Likewise.
2018-09-03 21:09:04 +00:00

371 lines
12 KiB
C

/* Helper macros for functions returning a narrower type.
Copyright (C) 2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#ifndef _MATH_NARROW_H
#define _MATH_NARROW_H 1
#include <bits/floatn.h>
#include <bits/long-double.h>
#include <errno.h>
#include <fenv.h>
#include <ieee754.h>
#include <math-barriers.h>
#include <math_private.h>
#include <fenv_private.h>
/* Carry out a computation using round-to-odd. The computation is
EXPR; the union type in which to store the result is UNION and the
subfield of the "ieee" field of that union with the low part of the
mantissa is MANTISSA; SUFFIX is the suffix for the libc_fe* macros
to ensure that the correct rounding mode is used, for platforms
with multiple rounding modes where those macros set only the
relevant mode. This macro does not work correctly if the sign of
an exact zero result depends on the rounding mode, so that case
must be checked for separately. */
#define ROUND_TO_ODD(EXPR, UNION, SUFFIX, MANTISSA) \
({ \
fenv_t env; \
UNION u; \
\
libc_feholdexcept_setround ## SUFFIX (&env, FE_TOWARDZERO); \
u.d = (EXPR); \
math_force_eval (u.d); \
u.ieee.MANTISSA \
|= libc_feupdateenv_test ## SUFFIX (&env, FE_INEXACT) != 0; \
\
u.d; \
})
/* Check for error conditions from a narrowing add function returning
RET with arguments X and Y and set errno as needed. Overflow and
underflow can occur for finite arguments and a domain error for
infinite ones. */
#define CHECK_NARROW_ADD(RET, X, Y) \
do \
{ \
if (!isfinite (RET)) \
{ \
if (isnan (RET)) \
{ \
if (!isnan (X) && !isnan (Y)) \
__set_errno (EDOM); \
} \
else if (isfinite (X) && isfinite (Y)) \
__set_errno (ERANGE); \
} \
else if ((RET) == 0 && (X) != -(Y)) \
__set_errno (ERANGE); \
} \
while (0)
/* Implement narrowing add using round-to-odd. The arguments are X
and Y, the return type is TYPE and UNION, MANTISSA and SUFFIX are
as for ROUND_TO_ODD. */
#define NARROW_ADD_ROUND_TO_ODD(X, Y, TYPE, UNION, SUFFIX, MANTISSA) \
do \
{ \
TYPE ret; \
\
/* Ensure a zero result is computed in the original rounding \
mode. */ \
if ((X) == -(Y)) \
ret = (TYPE) ((X) + (Y)); \
else \
ret = (TYPE) ROUND_TO_ODD (math_opt_barrier (X) + (Y), \
UNION, SUFFIX, MANTISSA); \
\
CHECK_NARROW_ADD (ret, (X), (Y)); \
return ret; \
} \
while (0)
/* Implement a narrowing add function that is not actually narrowing
or where no attempt is made to be correctly rounding (the latter
only applies to IBM long double). The arguments are X and Y and
the return type is TYPE. */
#define NARROW_ADD_TRIVIAL(X, Y, TYPE) \
do \
{ \
TYPE ret; \
\
ret = (TYPE) ((X) + (Y)); \
CHECK_NARROW_ADD (ret, (X), (Y)); \
return ret; \
} \
while (0)
/* Check for error conditions from a narrowing subtract function
returning RET with arguments X and Y and set errno as needed.
Overflow and underflow can occur for finite arguments and a domain
error for infinite ones. */
#define CHECK_NARROW_SUB(RET, X, Y) \
do \
{ \
if (!isfinite (RET)) \
{ \
if (isnan (RET)) \
{ \
if (!isnan (X) && !isnan (Y)) \
__set_errno (EDOM); \
} \
else if (isfinite (X) && isfinite (Y)) \
__set_errno (ERANGE); \
} \
else if ((RET) == 0 && (X) != (Y)) \
__set_errno (ERANGE); \
} \
while (0)
/* Implement narrowing subtract using round-to-odd. The arguments are
X and Y, the return type is TYPE and UNION, MANTISSA and SUFFIX are
as for ROUND_TO_ODD. */
#define NARROW_SUB_ROUND_TO_ODD(X, Y, TYPE, UNION, SUFFIX, MANTISSA) \
do \
{ \
TYPE ret; \
\
/* Ensure a zero result is computed in the original rounding \
mode. */ \
if ((X) == (Y)) \
ret = (TYPE) ((X) - (Y)); \
else \
ret = (TYPE) ROUND_TO_ODD (math_opt_barrier (X) - (Y), \
UNION, SUFFIX, MANTISSA); \
\
CHECK_NARROW_SUB (ret, (X), (Y)); \
return ret; \
} \
while (0)
/* Implement a narrowing subtract function that is not actually
narrowing or where no attempt is made to be correctly rounding (the
latter only applies to IBM long double). The arguments are X and Y
and the return type is TYPE. */
#define NARROW_SUB_TRIVIAL(X, Y, TYPE) \
do \
{ \
TYPE ret; \
\
ret = (TYPE) ((X) - (Y)); \
CHECK_NARROW_SUB (ret, (X), (Y)); \
return ret; \
} \
while (0)
/* Check for error conditions from a narrowing multiply function
returning RET with arguments X and Y and set errno as needed.
Overflow and underflow can occur for finite arguments and a domain
error for Inf * 0. */
#define CHECK_NARROW_MUL(RET, X, Y) \
do \
{ \
if (!isfinite (RET)) \
{ \
if (isnan (RET)) \
{ \
if (!isnan (X) && !isnan (Y)) \
__set_errno (EDOM); \
} \
else if (isfinite (X) && isfinite (Y)) \
__set_errno (ERANGE); \
} \
else if ((RET) == 0 && (X) != 0 && (Y) != 0) \
__set_errno (ERANGE); \
} \
while (0)
/* Implement narrowing multiply using round-to-odd. The arguments are
X and Y, the return type is TYPE and UNION, MANTISSA and SUFFIX are
as for ROUND_TO_ODD. */
#define NARROW_MUL_ROUND_TO_ODD(X, Y, TYPE, UNION, SUFFIX, MANTISSA) \
do \
{ \
TYPE ret; \
\
ret = (TYPE) ROUND_TO_ODD (math_opt_barrier (X) * (Y), \
UNION, SUFFIX, MANTISSA); \
\
CHECK_NARROW_MUL (ret, (X), (Y)); \
return ret; \
} \
while (0)
/* Implement a narrowing multiply function that is not actually
narrowing or where no attempt is made to be correctly rounding (the
latter only applies to IBM long double). The arguments are X and Y
and the return type is TYPE. */
#define NARROW_MUL_TRIVIAL(X, Y, TYPE) \
do \
{ \
TYPE ret; \
\
ret = (TYPE) ((X) * (Y)); \
CHECK_NARROW_MUL (ret, (X), (Y)); \
return ret; \
} \
while (0)
/* Check for error conditions from a narrowing divide function
returning RET with arguments X and Y and set errno as needed.
Overflow, underflow and divide-by-zero can occur for finite
arguments and a domain error for Inf / Inf and 0 / 0. */
#define CHECK_NARROW_DIV(RET, X, Y) \
do \
{ \
if (!isfinite (RET)) \
{ \
if (isnan (RET)) \
{ \
if (!isnan (X) && !isnan (Y)) \
__set_errno (EDOM); \
} \
else if (isfinite (X)) \
__set_errno (ERANGE); \
} \
else if ((RET) == 0 && (X) != 0 && !isinf (Y)) \
__set_errno (ERANGE); \
} \
while (0)
/* Implement narrowing divide using round-to-odd. The arguments are
X and Y, the return type is TYPE and UNION, MANTISSA and SUFFIX are
as for ROUND_TO_ODD. */
#define NARROW_DIV_ROUND_TO_ODD(X, Y, TYPE, UNION, SUFFIX, MANTISSA) \
do \
{ \
TYPE ret; \
\
ret = (TYPE) ROUND_TO_ODD (math_opt_barrier (X) / (Y), \
UNION, SUFFIX, MANTISSA); \
\
CHECK_NARROW_DIV (ret, (X), (Y)); \
return ret; \
} \
while (0)
/* Implement a narrowing divide function that is not actually
narrowing or where no attempt is made to be correctly rounding (the
latter only applies to IBM long double). The arguments are X and Y
and the return type is TYPE. */
#define NARROW_DIV_TRIVIAL(X, Y, TYPE) \
do \
{ \
TYPE ret; \
\
ret = (TYPE) ((X) / (Y)); \
CHECK_NARROW_DIV (ret, (X), (Y)); \
return ret; \
} \
while (0)
/* The following macros declare aliases for a narrowing function. The
sole argument is the base name of a family of functions, such as
"add". If any platform changes long double format after the
introduction of narrowing functions, in a way requiring symbol
versioning compatibility, additional variants of these macros will
be needed. */
#define libm_alias_float_double_main(func) \
weak_alias (__f ## func, f ## func) \
weak_alias (__f ## func, f32 ## func ## f64) \
weak_alias (__f ## func, f32 ## func ## f32x)
#ifdef NO_LONG_DOUBLE
# define libm_alias_float_double(func) \
libm_alias_float_double_main (func) \
weak_alias (__f ## func, f ## func ## l)
#else
# define libm_alias_float_double(func) \
libm_alias_float_double_main (func)
#endif
#define libm_alias_float32x_float64_main(func) \
weak_alias (__f32x ## func ## f64, f32x ## func ## f64)
#ifdef NO_LONG_DOUBLE
# define libm_alias_float32x_float64(func) \
libm_alias_float32x_float64_main (func) \
weak_alias (__f32x ## func ## f64, d ## func ## l)
#elif defined __LONG_DOUBLE_MATH_OPTIONAL
# define libm_alias_float32x_float64(func) \
libm_alias_float32x_float64_main (func) \
weak_alias (__f32x ## func ## f64, __nldbl_d ## func ## l)
#else
# define libm_alias_float32x_float64(func) \
libm_alias_float32x_float64_main (func)
#endif
#if __HAVE_FLOAT128 && !__HAVE_DISTINCT_FLOAT128
# define libm_alias_float_ldouble_f128(func) \
weak_alias (__f ## func ## l, f32 ## func ## f128)
# define libm_alias_double_ldouble_f128(func) \
weak_alias (__d ## func ## l, f32x ## func ## f128) \
weak_alias (__d ## func ## l, f64 ## func ## f128)
#else
# define libm_alias_float_ldouble_f128(func)
# define libm_alias_double_ldouble_f128(func)
#endif
#if __HAVE_FLOAT64X_LONG_DOUBLE
# define libm_alias_float_ldouble_f64x(func) \
weak_alias (__f ## func ## l, f32 ## func ## f64x)
# define libm_alias_double_ldouble_f64x(func) \
weak_alias (__d ## func ## l, f32x ## func ## f64x) \
weak_alias (__d ## func ## l, f64 ## func ## f64x)
#else
# define libm_alias_float_ldouble_f64x(func)
# define libm_alias_double_ldouble_f64x(func)
#endif
#define libm_alias_float_ldouble(func) \
weak_alias (__f ## func ## l, f ## func ## l) \
libm_alias_float_ldouble_f128 (func) \
libm_alias_float_ldouble_f64x (func)
#define libm_alias_double_ldouble(func) \
weak_alias (__d ## func ## l, d ## func ## l) \
libm_alias_double_ldouble_f128 (func) \
libm_alias_double_ldouble_f64x (func)
#define libm_alias_float64x_float128(func) \
weak_alias (__f64x ## func ## f128, f64x ## func ## f128)
#define libm_alias_float32_float128_main(func) \
weak_alias (__f32 ## func ## f128, f32 ## func ## f128)
#define libm_alias_float64_float128_main(func) \
weak_alias (__f64 ## func ## f128, f64 ## func ## f128) \
weak_alias (__f64 ## func ## f128, f32x ## func ## f128)
#if __HAVE_FLOAT64X_LONG_DOUBLE
# define libm_alias_float32_float128(func) \
libm_alias_float32_float128_main (func)
# define libm_alias_float64_float128(func) \
libm_alias_float64_float128_main (func)
#else
# define libm_alias_float32_float128(func) \
libm_alias_float32_float128_main (func) \
weak_alias (__f32 ## func ## f128, f32 ## func ## f64x)
# define libm_alias_float64_float128(func) \
libm_alias_float64_float128_main (func) \
weak_alias (__f64 ## func ## f128, f64 ## func ## f64x) \
weak_alias (__f64 ## func ## f128, f32x ## func ## f64x)
#endif
#endif /* math-narrow.h. */