1996-03-05 21:41:30 +00:00
|
|
|
/*
|
|
|
|
* ====================================================
|
|
|
|
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* Developed at SunPro, a Sun Microsystems, Inc. business.
|
|
|
|
* Permission to use, copy, modify, and distribute this
|
|
|
|
* software is freely granted, provided that this notice
|
|
|
|
* is preserved.
|
|
|
|
* ====================================================
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* from: @(#)fdlibm.h 5.1 93/09/24
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _MATH_PRIVATE_H_
|
|
|
|
#define _MATH_PRIVATE_H_
|
|
|
|
|
1999-07-14 00:54:57 +00:00
|
|
|
#include <endian.h>
|
2009-08-25 01:05:48 +00:00
|
|
|
#include <stdint.h>
|
1996-03-05 21:41:30 +00:00
|
|
|
#include <sys/types.h>
|
2012-03-09 20:51:27 +00:00
|
|
|
#include <fenv.h>
|
2015-09-18 20:00:48 +00:00
|
|
|
#include <float.h>
|
2014-06-23 16:15:41 +00:00
|
|
|
#include <get-rounding-mode.h>
|
1996-03-05 21:41:30 +00:00
|
|
|
|
2017-05-04 20:00:33 +00:00
|
|
|
/* Gather machine dependent _Floatn support. */
|
|
|
|
#include <bits/floatn.h>
|
|
|
|
|
1996-03-05 21:41:30 +00:00
|
|
|
/* The original fdlibm code used statements like:
|
|
|
|
n0 = ((*(int*)&one)>>29)^1; * index of high word *
|
|
|
|
ix0 = *(n0+(int*)&x); * high word of x *
|
|
|
|
ix1 = *((1-n0)+(int*)&x); * low word of x *
|
|
|
|
to dig two 32 bit words out of the 64 bit IEEE floating point
|
|
|
|
value. That is non-ANSI, and, moreover, the gcc instruction
|
|
|
|
scheduler gets it wrong. We instead use the following macros.
|
|
|
|
Unlike the original code, we determine the endianness at compile
|
|
|
|
time, not at run time; I don't see much benefit to selecting
|
|
|
|
endianness at run time. */
|
|
|
|
|
|
|
|
/* A union which permits us to convert between a double and two 32 bit
|
|
|
|
ints. */
|
|
|
|
|
2016-11-21 01:46:30 +00:00
|
|
|
#if __FLOAT_WORD_ORDER == __BIG_ENDIAN
|
1996-03-05 21:41:30 +00:00
|
|
|
|
|
|
|
typedef union
|
|
|
|
{
|
|
|
|
double value;
|
|
|
|
struct
|
|
|
|
{
|
2017-08-03 19:55:04 +00:00
|
|
|
uint32_t msw;
|
|
|
|
uint32_t lsw;
|
1996-03-05 21:41:30 +00:00
|
|
|
} parts;
|
2009-08-25 01:05:48 +00:00
|
|
|
uint64_t word;
|
1996-03-05 21:41:30 +00:00
|
|
|
} ieee_double_shape_type;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2016-11-21 01:46:30 +00:00
|
|
|
#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN
|
1996-03-05 21:41:30 +00:00
|
|
|
|
|
|
|
typedef union
|
|
|
|
{
|
|
|
|
double value;
|
|
|
|
struct
|
|
|
|
{
|
2017-08-03 19:55:04 +00:00
|
|
|
uint32_t lsw;
|
|
|
|
uint32_t msw;
|
1996-03-05 21:41:30 +00:00
|
|
|
} parts;
|
2009-08-25 01:05:48 +00:00
|
|
|
uint64_t word;
|
1996-03-05 21:41:30 +00:00
|
|
|
} ieee_double_shape_type;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Get two 32 bit ints from a double. */
|
|
|
|
|
|
|
|
#define EXTRACT_WORDS(ix0,ix1,d) \
|
|
|
|
do { \
|
|
|
|
ieee_double_shape_type ew_u; \
|
|
|
|
ew_u.value = (d); \
|
|
|
|
(ix0) = ew_u.parts.msw; \
|
|
|
|
(ix1) = ew_u.parts.lsw; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/* Get the more significant 32 bit int from a double. */
|
|
|
|
|
2012-03-09 20:38:23 +00:00
|
|
|
#ifndef GET_HIGH_WORD
|
|
|
|
# define GET_HIGH_WORD(i,d) \
|
1996-03-05 21:41:30 +00:00
|
|
|
do { \
|
|
|
|
ieee_double_shape_type gh_u; \
|
|
|
|
gh_u.value = (d); \
|
|
|
|
(i) = gh_u.parts.msw; \
|
|
|
|
} while (0)
|
2012-03-09 20:38:23 +00:00
|
|
|
#endif
|
1996-03-05 21:41:30 +00:00
|
|
|
|
|
|
|
/* Get the less significant 32 bit int from a double. */
|
|
|
|
|
2012-03-09 20:38:23 +00:00
|
|
|
#ifndef GET_LOW_WORD
|
|
|
|
# define GET_LOW_WORD(i,d) \
|
1996-03-05 21:41:30 +00:00
|
|
|
do { \
|
|
|
|
ieee_double_shape_type gl_u; \
|
|
|
|
gl_u.value = (d); \
|
|
|
|
(i) = gl_u.parts.lsw; \
|
|
|
|
} while (0)
|
2012-03-09 20:38:23 +00:00
|
|
|
#endif
|
1996-03-05 21:41:30 +00:00
|
|
|
|
2009-08-25 01:05:48 +00:00
|
|
|
/* Get all in one, efficient on 64-bit machines. */
|
2012-03-09 20:38:23 +00:00
|
|
|
#ifndef EXTRACT_WORDS64
|
|
|
|
# define EXTRACT_WORDS64(i,d) \
|
2009-08-25 01:05:48 +00:00
|
|
|
do { \
|
|
|
|
ieee_double_shape_type gh_u; \
|
|
|
|
gh_u.value = (d); \
|
|
|
|
(i) = gh_u.word; \
|
|
|
|
} while (0)
|
2012-03-09 20:38:23 +00:00
|
|
|
#endif
|
2009-08-25 01:05:48 +00:00
|
|
|
|
1996-03-05 21:41:30 +00:00
|
|
|
/* Set a double from two 32 bit ints. */
|
2012-03-09 20:38:23 +00:00
|
|
|
#ifndef INSERT_WORDS
|
|
|
|
# define INSERT_WORDS(d,ix0,ix1) \
|
1996-03-05 21:41:30 +00:00
|
|
|
do { \
|
|
|
|
ieee_double_shape_type iw_u; \
|
|
|
|
iw_u.parts.msw = (ix0); \
|
|
|
|
iw_u.parts.lsw = (ix1); \
|
|
|
|
(d) = iw_u.value; \
|
|
|
|
} while (0)
|
2012-03-09 20:38:23 +00:00
|
|
|
#endif
|
1996-03-05 21:41:30 +00:00
|
|
|
|
2009-08-25 01:05:48 +00:00
|
|
|
/* Get all in one, efficient on 64-bit machines. */
|
2012-03-09 20:38:23 +00:00
|
|
|
#ifndef INSERT_WORDS64
|
|
|
|
# define INSERT_WORDS64(d,i) \
|
2009-08-25 01:05:48 +00:00
|
|
|
do { \
|
|
|
|
ieee_double_shape_type iw_u; \
|
|
|
|
iw_u.word = (i); \
|
|
|
|
(d) = iw_u.value; \
|
|
|
|
} while (0)
|
2012-03-09 20:38:23 +00:00
|
|
|
#endif
|
2009-08-25 01:05:48 +00:00
|
|
|
|
1996-03-05 21:41:30 +00:00
|
|
|
/* Set the more significant 32 bits of a double from an int. */
|
2012-03-09 20:38:23 +00:00
|
|
|
#ifndef SET_HIGH_WORD
|
1996-03-05 21:41:30 +00:00
|
|
|
#define SET_HIGH_WORD(d,v) \
|
|
|
|
do { \
|
|
|
|
ieee_double_shape_type sh_u; \
|
|
|
|
sh_u.value = (d); \
|
|
|
|
sh_u.parts.msw = (v); \
|
|
|
|
(d) = sh_u.value; \
|
|
|
|
} while (0)
|
2012-03-09 20:38:23 +00:00
|
|
|
#endif
|
1996-03-05 21:41:30 +00:00
|
|
|
|
|
|
|
/* Set the less significant 32 bits of a double from an int. */
|
2012-03-09 20:38:23 +00:00
|
|
|
#ifndef SET_LOW_WORD
|
|
|
|
# define SET_LOW_WORD(d,v) \
|
1996-03-05 21:41:30 +00:00
|
|
|
do { \
|
|
|
|
ieee_double_shape_type sl_u; \
|
|
|
|
sl_u.value = (d); \
|
|
|
|
sl_u.parts.lsw = (v); \
|
|
|
|
(d) = sl_u.value; \
|
|
|
|
} while (0)
|
2012-03-09 20:38:23 +00:00
|
|
|
#endif
|
1996-03-05 21:41:30 +00:00
|
|
|
|
|
|
|
/* A union which permits us to convert between a float and a 32 bit
|
|
|
|
int. */
|
|
|
|
|
|
|
|
typedef union
|
|
|
|
{
|
|
|
|
float value;
|
2017-08-03 19:55:04 +00:00
|
|
|
uint32_t word;
|
1996-03-05 21:41:30 +00:00
|
|
|
} ieee_float_shape_type;
|
|
|
|
|
|
|
|
/* Get a 32 bit int from a float. */
|
2012-03-09 20:38:23 +00:00
|
|
|
#ifndef GET_FLOAT_WORD
|
|
|
|
# define GET_FLOAT_WORD(i,d) \
|
1996-03-05 21:41:30 +00:00
|
|
|
do { \
|
|
|
|
ieee_float_shape_type gf_u; \
|
|
|
|
gf_u.value = (d); \
|
|
|
|
(i) = gf_u.word; \
|
|
|
|
} while (0)
|
2012-03-09 20:38:23 +00:00
|
|
|
#endif
|
1996-03-05 21:41:30 +00:00
|
|
|
|
|
|
|
/* Set a float from a 32 bit int. */
|
2012-03-09 20:38:23 +00:00
|
|
|
#ifndef SET_FLOAT_WORD
|
|
|
|
# define SET_FLOAT_WORD(d,i) \
|
1996-03-05 21:41:30 +00:00
|
|
|
do { \
|
|
|
|
ieee_float_shape_type sf_u; \
|
|
|
|
sf_u.word = (i); \
|
|
|
|
(d) = sf_u.value; \
|
|
|
|
} while (0)
|
2012-03-09 20:38:23 +00:00
|
|
|
#endif
|
1996-03-05 21:41:30 +00:00
|
|
|
|
2017-05-04 17:47:27 +00:00
|
|
|
/* We need to guarantee an expansion of name when building
|
|
|
|
ldbl-128 files as another type (e.g _Float128). */
|
|
|
|
#define mathx_hidden_def(name) hidden_def(name)
|
|
|
|
|
1999-07-14 00:54:57 +00:00
|
|
|
/* Get long double macros from a separate header. */
|
|
|
|
#include <math_ldbl.h>
|
Thu May 30 11:24:05 1996 Roland McGrath <roland@delasyd.gnu.ai.mit.edu>
* po/header.pot: Replace with exact boilerplate pinard dictates.
* sysdeps/i386/strtok.S (Lillegal_argument): Remove this code to set
errno and the check that jumped to it.
* sysdeps/mach/hurd/Makefile (errnos.d): Use $(sed-remove-objpfx).
Thu May 30 03:21:57 1996 Ulrich Drepper <drepper@cygnus.com>
* FAQ: Document need of gperf program for developers.
* elf/elf.h: Fix typos in comments.
* libio/stdio.h [!__STRICT_ANSI__ || _POSIX_SOURCE]: Add
prototypes for `ctermid' and `cuserid'.
* locale/programs/locale.c: Switch to user selected locale
before printing variables.
* math/Makefile [$(long-double-fcts)==yes]: Define long-m-routines
and long-c-routines. Only if the `long double' data type is
available we need to compile the functions.
(libm-routines): Add $(long-m-routines).
(routines): Remove isinfl, isnanl. Use new file s_isinfl and
s_isnanl instead if `long double' is available.
* math/math.h: Include <mathcalls.h> again to define `long double'
functions.
* math/math_private.h: Define data types, prototypes and access
macros for `long double'.
* stdlib/stdlib.h: Add prototypes for `strtoll' and `strtoull'.
[GCC2 && OPTIMIZE]: Define strto{,u}ll as inline function which
calls __strto{,u}q_internal.
* stdlib/strfmon.c: Replace PTR by `void *'.
* stdlib/strtoq.c: Define strtoll as weak alias.
* stdlib/strtouq.c: Define strtoull as weak alias.
* string/tester.c: Correct `strsep' test.
* sysdeps/generic/strsep.c: Make compatible with BSD version.
Trailing characters of skip set are not skipped. In this case
empty tokens are returned.
* sysdeps/i386/isinfl.c, sysdeps/i386/isnanl.c,
sysdeps/ieee754/isinf.c, sysdeps/ieee754/isinfl.c,
sysdeps/ieee754/isnan.c, sysdeps/ieee754/isnanl.c: Removed. We
now use the versions part of libm.
* sysdeps/i386/strsep.S: Removed. Generic C version is of
similar speed.
* sysdeps/i386/strtok.S: Remove support for `strsep'.
* sysdeps/libm-i387/e_acosl.S, sysdeps/libm-i387/s_ceill.S,
sysdeps/libm-i387/s_copysignl.S, sysdeps/libm-i387/s_finitel.S,
sysdeps/libm-i387/s_floorl.S, sysdeps/libm-i387/s_isinfl.c,
sysdeps/libm-i387/s_isnanl.c, sysdeps/libm-i387/s_nextafterl.c,
sysdeps/libm-i387/s_rintl.S, sysdeps/libm-i387/s_significandl.S:
New i387 specific math functions implementing `long double'
versions.
* sysdeps/libm-ieee754/s_ceill.c,
sysdeps/libm-ieee754/s_copysignl.c,
sysdeps/libm-ieee754/s_fabsl.c, sysdeps/libm-ieee754/s_finitel.c,
sysdeps/libm-ieee754/s_floorl.c, sysdeps/libm-ieee754/s_isinfl.c,
sysdeps/libm-ieee754/s_isnanl.c,
sysdeps/libm-ieee754/s_nextafterl.c,
sysdeps/libm-ieee754/s_rintl.c, sysdeps/libm-ieee754/s_scalbnl.c,
sysdeps/libm-ieee754/s_significandl.c: New generic `long double'
versions of libm functions.
* sysdeps/libm-i387/e_exp.S: Add a few comments to explain the
Intel FPU nonsense.
* sysdeps/libm-i387/s_ceil.S, sysdeps/libm-i387/s_ceilf.S,
sysdeps/libm-i387/s_floor.S, sysdeps/libm-i387/s_floorf.S: Correct
handling of local variables. The old version created a stack
frame but stored the values outside.
* sysdeps/libm-ieee754/s_isinf.c, sysdeps/libm-ieee754/s_isnan.c
[!NO_LONG_DOUBLE]: Define alias with `long double' versions name.
* login/pututline_r.c: Include sys/stat.h. Fix typos.
according to currently used locale for category LC_CTYPE by
inet_nsap_ntoa. Now in <arpa/inet.h>.
_IO_dup2 to contain complete parameter list.
1996-05-30 16:12:42 +00:00
|
|
|
|
2017-03-28 12:48:42 +00:00
|
|
|
/* Include function declarations for each floating-point. */
|
|
|
|
#define _Mdouble_ double
|
|
|
|
#define _MSUF_
|
|
|
|
#include <math_private_calls.h>
|
|
|
|
#undef _MSUF_
|
|
|
|
#undef _Mdouble_
|
|
|
|
|
|
|
|
#define _Mdouble_ float
|
|
|
|
#define _MSUF_ f
|
|
|
|
#define __MATH_DECLARING_FLOAT
|
|
|
|
#include <math_private_calls.h>
|
|
|
|
#undef __MATH_DECLARING_FLOAT
|
|
|
|
#undef _MSUF_
|
|
|
|
#undef _Mdouble_
|
|
|
|
|
|
|
|
#define _Mdouble_ long double
|
|
|
|
#define _MSUF_ l
|
|
|
|
#define __MATH_DECLARING_LONG_DOUBLE
|
|
|
|
#include <math_private_calls.h>
|
|
|
|
#undef __MATH_DECLARING_LONG_DOUBLE
|
|
|
|
#undef _MSUF_
|
|
|
|
#undef _Mdouble_
|
1996-03-05 21:41:30 +00:00
|
|
|
|
2017-05-04 20:00:33 +00:00
|
|
|
#if __HAVE_DISTINCT_FLOAT128
|
|
|
|
# define _Mdouble_ _Float128
|
|
|
|
# define _MSUF_ f128
|
|
|
|
# define __MATH_DECLARING_FLOATN
|
|
|
|
# include <math_private_calls.h>
|
|
|
|
# undef __MATH_DECLARING_FLOATN
|
|
|
|
# undef _MSUF_
|
|
|
|
# undef _Mdouble_
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if __HAVE_DISTINCT_FLOAT128
|
|
|
|
|
|
|
|
/* __builtin_isinf_sign is broken in GCC < 7 for float128. */
|
|
|
|
# if ! __GNUC_PREREQ (7, 0)
|
|
|
|
# include <ieee754_float128.h>
|
|
|
|
extern inline int
|
|
|
|
__isinff128 (_Float128 x)
|
|
|
|
{
|
|
|
|
int64_t hx, lx;
|
|
|
|
GET_FLOAT128_WORDS64 (hx, lx, x);
|
|
|
|
lx |= (hx & 0x7fffffffffffffffLL) ^ 0x7fff000000000000LL;
|
|
|
|
lx |= -lx;
|
|
|
|
return ~(lx >> 63) & (hx >> 62);
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
|
|
|
|
extern inline _Float128
|
|
|
|
fabsf128 (_Float128 x)
|
|
|
|
{
|
|
|
|
return __builtin_fabsf128 (x);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
2001-05-12 20:15:01 +00:00
|
|
|
/* Prototypes for functions of the IBM Accurate Mathematical Library. */
|
|
|
|
extern double __exp1 (double __x, double __xx, double __error);
|
|
|
|
extern double __sin (double __x);
|
|
|
|
extern double __cos (double __x);
|
|
|
|
extern int __branred (double __x, double *__a, double *__aa);
|
|
|
|
extern void __doasin (double __x, double __dx, double __v[]);
|
|
|
|
extern void __dubsin (double __x, double __dx, double __v[]);
|
|
|
|
extern void __dubcos (double __x, double __dx, double __v[]);
|
|
|
|
extern double __halfulp (double __x, double __y);
|
|
|
|
extern double __sin32 (double __x, double __res, double __res1);
|
|
|
|
extern double __cos32 (double __x, double __res, double __res1);
|
2013-10-08 06:20:17 +00:00
|
|
|
extern double __mpsin (double __x, double __dx, bool __range_reduce);
|
|
|
|
extern double __mpcos (double __x, double __dx, bool __range_reduce);
|
2001-05-12 20:15:01 +00:00
|
|
|
extern double __slowexp (double __x);
|
|
|
|
extern double __slowpow (double __x, double __y, double __z);
|
|
|
|
extern void __docos (double __x, double __dx, double __v[]);
|
|
|
|
|
2007-04-16 20:41:42 +00:00
|
|
|
#ifndef math_opt_barrier
|
2012-01-08 04:57:22 +00:00
|
|
|
# define math_opt_barrier(x) \
|
2011-10-26 09:19:35 +00:00
|
|
|
({ __typeof (x) __x = (x); __asm ("" : "+m" (__x)); __x; })
|
2012-01-08 04:57:22 +00:00
|
|
|
# define math_force_eval(x) \
|
|
|
|
({ __typeof (x) __x = (x); __asm __volatile__ ("" : : "m" (__x)); })
|
2007-04-16 20:41:42 +00:00
|
|
|
#endif
|
|
|
|
|
2015-09-18 20:00:48 +00:00
|
|
|
/* math_narrow_eval reduces its floating-point argument to the range
|
|
|
|
and precision of its semantic type. (The original evaluation may
|
|
|
|
still occur with excess range and precision, so the result may be
|
|
|
|
affected by double rounding.) */
|
|
|
|
#if FLT_EVAL_METHOD == 0
|
|
|
|
# define math_narrow_eval(x) (x)
|
|
|
|
#else
|
|
|
|
# if FLT_EVAL_METHOD == 1
|
|
|
|
# define excess_precision(type) __builtin_types_compatible_p (type, float)
|
|
|
|
# else
|
|
|
|
# define excess_precision(type) (__builtin_types_compatible_p (type, float) \
|
|
|
|
|| __builtin_types_compatible_p (type, \
|
|
|
|
double))
|
|
|
|
# endif
|
|
|
|
# define math_narrow_eval(x) \
|
|
|
|
({ \
|
|
|
|
__typeof (x) math_narrow_eval_tmp = (x); \
|
|
|
|
if (excess_precision (__typeof (math_narrow_eval_tmp))) \
|
|
|
|
__asm__ ("" : "+m" (math_narrow_eval_tmp)); \
|
|
|
|
math_narrow_eval_tmp; \
|
|
|
|
})
|
|
|
|
#endif
|
|
|
|
|
2016-11-10 21:41:56 +00:00
|
|
|
#define fabs_tg(x) __MATH_TG ((x), (__typeof (x)) __builtin_fabs, (x))
|
2017-05-04 20:00:33 +00:00
|
|
|
|
2017-08-04 21:32:57 +00:00
|
|
|
#define min_of_type_f FLT_MIN
|
|
|
|
#define min_of_type_ DBL_MIN
|
|
|
|
#define min_of_type_l LDBL_MIN
|
|
|
|
#define min_of_type_f128 FLT128_MIN
|
|
|
|
|
|
|
|
#define min_of_type(x) __MATH_TG ((x), (__typeof (x)) min_of_type_, )
|
2015-09-23 22:42:30 +00:00
|
|
|
|
|
|
|
/* If X (which is not a NaN) is subnormal, force an underflow
|
|
|
|
exception. */
|
|
|
|
#define math_check_force_underflow(x) \
|
|
|
|
do \
|
|
|
|
{ \
|
|
|
|
__typeof (x) force_underflow_tmp = (x); \
|
|
|
|
if (fabs_tg (force_underflow_tmp) \
|
2017-08-04 21:32:57 +00:00
|
|
|
< min_of_type (force_underflow_tmp)) \
|
2015-09-23 22:42:30 +00:00
|
|
|
{ \
|
|
|
|
__typeof (force_underflow_tmp) force_underflow_tmp2 \
|
|
|
|
= force_underflow_tmp * force_underflow_tmp; \
|
|
|
|
math_force_eval (force_underflow_tmp2); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
/* Likewise, but X is also known to be nonnegative. */
|
|
|
|
#define math_check_force_underflow_nonneg(x) \
|
|
|
|
do \
|
|
|
|
{ \
|
|
|
|
__typeof (x) force_underflow_tmp = (x); \
|
|
|
|
if (force_underflow_tmp \
|
2017-08-04 21:32:57 +00:00
|
|
|
< min_of_type (force_underflow_tmp)) \
|
2015-09-23 22:42:30 +00:00
|
|
|
{ \
|
|
|
|
__typeof (force_underflow_tmp) force_underflow_tmp2 \
|
|
|
|
= force_underflow_tmp * force_underflow_tmp; \
|
|
|
|
math_force_eval (force_underflow_tmp2); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
/* Likewise, for both real and imaginary parts of a complex
|
|
|
|
result. */
|
|
|
|
#define math_check_force_underflow_complex(x) \
|
|
|
|
do \
|
|
|
|
{ \
|
|
|
|
__typeof (x) force_underflow_complex_tmp = (x); \
|
|
|
|
math_check_force_underflow (__real__ force_underflow_complex_tmp); \
|
|
|
|
math_check_force_underflow (__imag__ force_underflow_complex_tmp); \
|
|
|
|
} \
|
|
|
|
while (0)
|
2011-10-18 13:00:46 +00:00
|
|
|
|
|
|
|
/* The standards only specify one variant of the fenv.h interfaces.
|
|
|
|
But at least for some architectures we can be more efficient if we
|
|
|
|
know what operations are going to be performed. Therefore we
|
|
|
|
define additional interfaces. By default they refer to the normal
|
|
|
|
interfaces. */
|
|
|
|
|
2012-03-09 20:51:27 +00:00
|
|
|
static __always_inline void
|
|
|
|
default_libc_feholdexcept (fenv_t *e)
|
|
|
|
{
|
2015-01-05 23:06:14 +00:00
|
|
|
(void) __feholdexcept (e);
|
2012-03-09 20:51:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef libc_feholdexcept
|
|
|
|
# define libc_feholdexcept default_libc_feholdexcept
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feholdexceptf
|
|
|
|
# define libc_feholdexceptf default_libc_feholdexcept
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feholdexceptl
|
|
|
|
# define libc_feholdexceptl default_libc_feholdexcept
|
|
|
|
#endif
|
|
|
|
|
2012-11-03 19:48:53 +00:00
|
|
|
static __always_inline void
|
|
|
|
default_libc_fesetround (int r)
|
|
|
|
{
|
2015-01-07 00:41:23 +00:00
|
|
|
(void) __fesetround (r);
|
2012-11-03 19:48:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef libc_fesetround
|
|
|
|
# define libc_fesetround default_libc_fesetround
|
|
|
|
#endif
|
|
|
|
#ifndef libc_fesetroundf
|
|
|
|
# define libc_fesetroundf default_libc_fesetround
|
|
|
|
#endif
|
|
|
|
#ifndef libc_fesetroundl
|
|
|
|
# define libc_fesetroundl default_libc_fesetround
|
|
|
|
#endif
|
|
|
|
|
2012-03-09 20:51:27 +00:00
|
|
|
static __always_inline void
|
|
|
|
default_libc_feholdexcept_setround (fenv_t *e, int r)
|
|
|
|
{
|
2015-01-05 23:06:14 +00:00
|
|
|
__feholdexcept (e);
|
2015-01-07 00:41:23 +00:00
|
|
|
__fesetround (r);
|
2012-03-09 20:51:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef libc_feholdexcept_setround
|
|
|
|
# define libc_feholdexcept_setround default_libc_feholdexcept_setround
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feholdexcept_setroundf
|
|
|
|
# define libc_feholdexcept_setroundf default_libc_feholdexcept_setround
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feholdexcept_setroundl
|
|
|
|
# define libc_feholdexcept_setroundl default_libc_feholdexcept_setround
|
|
|
|
#endif
|
|
|
|
|
2013-06-05 08:26:19 +00:00
|
|
|
#ifndef libc_feholdsetround_53bit
|
|
|
|
# define libc_feholdsetround_53bit libc_feholdsetround
|
2012-03-09 20:51:27 +00:00
|
|
|
#endif
|
2011-10-18 13:59:04 +00:00
|
|
|
|
2012-03-09 20:51:27 +00:00
|
|
|
#ifndef libc_fetestexcept
|
|
|
|
# define libc_fetestexcept fetestexcept
|
|
|
|
#endif
|
|
|
|
#ifndef libc_fetestexceptf
|
|
|
|
# define libc_fetestexceptf fetestexcept
|
|
|
|
#endif
|
|
|
|
#ifndef libc_fetestexceptl
|
|
|
|
# define libc_fetestexceptl fetestexcept
|
|
|
|
#endif
|
2012-03-14 16:20:10 +00:00
|
|
|
|
2012-03-09 20:51:27 +00:00
|
|
|
static __always_inline void
|
|
|
|
default_libc_fesetenv (fenv_t *e)
|
|
|
|
{
|
2015-01-06 23:36:20 +00:00
|
|
|
(void) __fesetenv (e);
|
2012-03-09 20:51:27 +00:00
|
|
|
}
|
2011-10-18 19:11:31 +00:00
|
|
|
|
2012-03-09 20:51:27 +00:00
|
|
|
#ifndef libc_fesetenv
|
|
|
|
# define libc_fesetenv default_libc_fesetenv
|
|
|
|
#endif
|
|
|
|
#ifndef libc_fesetenvf
|
|
|
|
# define libc_fesetenvf default_libc_fesetenv
|
|
|
|
#endif
|
|
|
|
#ifndef libc_fesetenvl
|
|
|
|
# define libc_fesetenvl default_libc_fesetenv
|
|
|
|
#endif
|
2011-10-18 13:00:46 +00:00
|
|
|
|
2012-03-09 20:51:27 +00:00
|
|
|
static __always_inline void
|
|
|
|
default_libc_feupdateenv (fenv_t *e)
|
|
|
|
{
|
2015-01-07 19:01:20 +00:00
|
|
|
(void) __feupdateenv (e);
|
2012-03-09 20:51:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef libc_feupdateenv
|
|
|
|
# define libc_feupdateenv default_libc_feupdateenv
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feupdateenvf
|
|
|
|
# define libc_feupdateenvf default_libc_feupdateenv
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feupdateenvl
|
|
|
|
# define libc_feupdateenvl default_libc_feupdateenv
|
|
|
|
#endif
|
2011-10-18 19:11:31 +00:00
|
|
|
|
2013-06-05 08:26:19 +00:00
|
|
|
#ifndef libc_feresetround_53bit
|
|
|
|
# define libc_feresetround_53bit libc_feresetround
|
2012-03-09 20:51:27 +00:00
|
|
|
#endif
|
2012-03-14 16:20:10 +00:00
|
|
|
|
2012-03-10 16:53:05 +00:00
|
|
|
static __always_inline int
|
|
|
|
default_libc_feupdateenv_test (fenv_t *e, int ex)
|
|
|
|
{
|
|
|
|
int ret = fetestexcept (ex);
|
2015-01-07 19:01:20 +00:00
|
|
|
__feupdateenv (e);
|
2012-03-10 16:53:05 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef libc_feupdateenv_test
|
|
|
|
# define libc_feupdateenv_test default_libc_feupdateenv_test
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feupdateenv_testf
|
|
|
|
# define libc_feupdateenv_testf default_libc_feupdateenv_test
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feupdateenv_testl
|
|
|
|
# define libc_feupdateenv_testl default_libc_feupdateenv_test
|
|
|
|
#endif
|
|
|
|
|
2012-03-10 16:55:53 +00:00
|
|
|
/* Save and set the rounding mode. The use of fenv_t to store the old mode
|
|
|
|
allows a target-specific version of this function to avoid converting the
|
|
|
|
rounding mode from the fpu format. By default we have no choice but to
|
|
|
|
manipulate the entire env. */
|
|
|
|
|
|
|
|
#ifndef libc_feholdsetround
|
|
|
|
# define libc_feholdsetround libc_feholdexcept_setround
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feholdsetroundf
|
|
|
|
# define libc_feholdsetroundf libc_feholdexcept_setroundf
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feholdsetroundl
|
|
|
|
# define libc_feholdsetroundl libc_feholdexcept_setroundl
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* ... and the reverse. */
|
|
|
|
|
|
|
|
#ifndef libc_feresetround
|
|
|
|
# define libc_feresetround libc_feupdateenv
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feresetroundf
|
|
|
|
# define libc_feresetroundf libc_feupdateenvf
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feresetroundl
|
|
|
|
# define libc_feresetroundl libc_feupdateenvl
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* ... and a version that may also discard exceptions. */
|
|
|
|
|
|
|
|
#ifndef libc_feresetround_noex
|
|
|
|
# define libc_feresetround_noex libc_fesetenv
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feresetround_noexf
|
|
|
|
# define libc_feresetround_noexf libc_fesetenvf
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feresetround_noexl
|
|
|
|
# define libc_feresetround_noexl libc_fesetenvl
|
|
|
|
#endif
|
|
|
|
|
2014-06-23 16:15:41 +00:00
|
|
|
#ifndef HAVE_RM_CTX
|
|
|
|
# define HAVE_RM_CTX 0
|
|
|
|
#endif
|
|
|
|
|
2014-03-17 20:36:06 +00:00
|
|
|
#if HAVE_RM_CTX
|
Set/restore rounding mode only when needed
The most common use case of math functions is with default rounding
mode, i.e. rounding to nearest. Setting and restoring rounding mode
is an unnecessary overhead for this, so I've added support for a
context, which does the set/restore only if the FP status needs a
change. The code is written such that only x86 uses these. Other
architectures should be unaffected by it, but would definitely benefit
if the set/restore has as much overhead relative to the rest of the
code, as the x86 bits do.
Here's a summary of the performance improvement due to these
improvements; I've only mentioned functions that use the set/restore
and have benchmark inputs for x86_64:
Before:
cos(): ITERS:4.69335e+08: TOTAL:28884.6Mcy, MAX:4080.28cy, MIN:57.562cy, 16248.6 calls/Mcy
exp(): ITERS:4.47604e+08: TOTAL:28796.2Mcy, MAX:207.721cy, MIN:62.385cy, 15543.9 calls/Mcy
pow(): ITERS:1.63485e+08: TOTAL:28879.9Mcy, MAX:362.255cy, MIN:172.469cy, 5660.86 calls/Mcy
sin(): ITERS:3.89578e+08: TOTAL:28900Mcy, MAX:704.859cy, MIN:47.583cy, 13480.2 calls/Mcy
tan(): ITERS:7.0971e+07: TOTAL:28902.2Mcy, MAX:1357.79cy, MIN:388.58cy, 2455.55 calls/Mcy
After:
cos(): ITERS:6.0014e+08: TOTAL:28875.9Mcy, MAX:364.283cy, MIN:45.716cy, 20783.4 calls/Mcy
exp(): ITERS:5.48578e+08: TOTAL:28764.9Mcy, MAX:191.617cy, MIN:51.011cy, 19071.1 calls/Mcy
pow(): ITERS:1.70013e+08: TOTAL:28873.6Mcy, MAX:689.522cy, MIN:163.989cy, 5888.18 calls/Mcy
sin(): ITERS:4.64079e+08: TOTAL:28891.5Mcy, MAX:6959.3cy, MIN:36.189cy, 16062.8 calls/Mcy
tan(): ITERS:7.2354e+07: TOTAL:28898.9Mcy, MAX:1295.57cy, MIN:380.698cy, 2503.7 calls/Mcy
So the improvements are:
cos: 27.9089%
exp: 22.6919%
pow: 4.01564%
sin: 19.1585%
tan: 1.96086%
The downside of the change is that it will have an adverse performance
impact on non-default rounding modes, but I think the tradeoff is
justified.
2013-06-12 05:06:48 +00:00
|
|
|
/* Set/Restore Rounding Modes only when necessary. If defined, these functions
|
|
|
|
set/restore floating point state only if the state needed within the lexical
|
|
|
|
block is different from the current state. This saves a lot of time when
|
|
|
|
the floating point unit is much slower than the fixed point units. */
|
|
|
|
|
2014-06-23 16:15:41 +00:00
|
|
|
# ifndef libc_feholdsetround_noex_ctx
|
|
|
|
# define libc_feholdsetround_noex_ctx libc_feholdsetround_ctx
|
|
|
|
# endif
|
|
|
|
# ifndef libc_feholdsetround_noexf_ctx
|
|
|
|
# define libc_feholdsetround_noexf_ctx libc_feholdsetroundf_ctx
|
|
|
|
# endif
|
|
|
|
# ifndef libc_feholdsetround_noexl_ctx
|
|
|
|
# define libc_feholdsetround_noexl_ctx libc_feholdsetroundl_ctx
|
|
|
|
# endif
|
|
|
|
|
Set/restore rounding mode only when needed
The most common use case of math functions is with default rounding
mode, i.e. rounding to nearest. Setting and restoring rounding mode
is an unnecessary overhead for this, so I've added support for a
context, which does the set/restore only if the FP status needs a
change. The code is written such that only x86 uses these. Other
architectures should be unaffected by it, but would definitely benefit
if the set/restore has as much overhead relative to the rest of the
code, as the x86 bits do.
Here's a summary of the performance improvement due to these
improvements; I've only mentioned functions that use the set/restore
and have benchmark inputs for x86_64:
Before:
cos(): ITERS:4.69335e+08: TOTAL:28884.6Mcy, MAX:4080.28cy, MIN:57.562cy, 16248.6 calls/Mcy
exp(): ITERS:4.47604e+08: TOTAL:28796.2Mcy, MAX:207.721cy, MIN:62.385cy, 15543.9 calls/Mcy
pow(): ITERS:1.63485e+08: TOTAL:28879.9Mcy, MAX:362.255cy, MIN:172.469cy, 5660.86 calls/Mcy
sin(): ITERS:3.89578e+08: TOTAL:28900Mcy, MAX:704.859cy, MIN:47.583cy, 13480.2 calls/Mcy
tan(): ITERS:7.0971e+07: TOTAL:28902.2Mcy, MAX:1357.79cy, MIN:388.58cy, 2455.55 calls/Mcy
After:
cos(): ITERS:6.0014e+08: TOTAL:28875.9Mcy, MAX:364.283cy, MIN:45.716cy, 20783.4 calls/Mcy
exp(): ITERS:5.48578e+08: TOTAL:28764.9Mcy, MAX:191.617cy, MIN:51.011cy, 19071.1 calls/Mcy
pow(): ITERS:1.70013e+08: TOTAL:28873.6Mcy, MAX:689.522cy, MIN:163.989cy, 5888.18 calls/Mcy
sin(): ITERS:4.64079e+08: TOTAL:28891.5Mcy, MAX:6959.3cy, MIN:36.189cy, 16062.8 calls/Mcy
tan(): ITERS:7.2354e+07: TOTAL:28898.9Mcy, MAX:1295.57cy, MIN:380.698cy, 2503.7 calls/Mcy
So the improvements are:
cos: 27.9089%
exp: 22.6919%
pow: 4.01564%
sin: 19.1585%
tan: 1.96086%
The downside of the change is that it will have an adverse performance
impact on non-default rounding modes, but I think the tradeoff is
justified.
2013-06-12 05:06:48 +00:00
|
|
|
# ifndef libc_feresetround_noex_ctx
|
|
|
|
# define libc_feresetround_noex_ctx libc_fesetenv_ctx
|
|
|
|
# endif
|
|
|
|
# ifndef libc_feresetround_noexf_ctx
|
|
|
|
# define libc_feresetround_noexf_ctx libc_fesetenvf_ctx
|
|
|
|
# endif
|
|
|
|
# ifndef libc_feresetround_noexl_ctx
|
|
|
|
# define libc_feresetround_noexl_ctx libc_fesetenvl_ctx
|
|
|
|
# endif
|
|
|
|
|
2014-06-23 16:15:41 +00:00
|
|
|
#else
|
Set/restore rounding mode only when needed
The most common use case of math functions is with default rounding
mode, i.e. rounding to nearest. Setting and restoring rounding mode
is an unnecessary overhead for this, so I've added support for a
context, which does the set/restore only if the FP status needs a
change. The code is written such that only x86 uses these. Other
architectures should be unaffected by it, but would definitely benefit
if the set/restore has as much overhead relative to the rest of the
code, as the x86 bits do.
Here's a summary of the performance improvement due to these
improvements; I've only mentioned functions that use the set/restore
and have benchmark inputs for x86_64:
Before:
cos(): ITERS:4.69335e+08: TOTAL:28884.6Mcy, MAX:4080.28cy, MIN:57.562cy, 16248.6 calls/Mcy
exp(): ITERS:4.47604e+08: TOTAL:28796.2Mcy, MAX:207.721cy, MIN:62.385cy, 15543.9 calls/Mcy
pow(): ITERS:1.63485e+08: TOTAL:28879.9Mcy, MAX:362.255cy, MIN:172.469cy, 5660.86 calls/Mcy
sin(): ITERS:3.89578e+08: TOTAL:28900Mcy, MAX:704.859cy, MIN:47.583cy, 13480.2 calls/Mcy
tan(): ITERS:7.0971e+07: TOTAL:28902.2Mcy, MAX:1357.79cy, MIN:388.58cy, 2455.55 calls/Mcy
After:
cos(): ITERS:6.0014e+08: TOTAL:28875.9Mcy, MAX:364.283cy, MIN:45.716cy, 20783.4 calls/Mcy
exp(): ITERS:5.48578e+08: TOTAL:28764.9Mcy, MAX:191.617cy, MIN:51.011cy, 19071.1 calls/Mcy
pow(): ITERS:1.70013e+08: TOTAL:28873.6Mcy, MAX:689.522cy, MIN:163.989cy, 5888.18 calls/Mcy
sin(): ITERS:4.64079e+08: TOTAL:28891.5Mcy, MAX:6959.3cy, MIN:36.189cy, 16062.8 calls/Mcy
tan(): ITERS:7.2354e+07: TOTAL:28898.9Mcy, MAX:1295.57cy, MIN:380.698cy, 2503.7 calls/Mcy
So the improvements are:
cos: 27.9089%
exp: 22.6919%
pow: 4.01564%
sin: 19.1585%
tan: 1.96086%
The downside of the change is that it will have an adverse performance
impact on non-default rounding modes, but I think the tradeoff is
justified.
2013-06-12 05:06:48 +00:00
|
|
|
|
2014-06-23 16:15:41 +00:00
|
|
|
/* Default implementation using standard fenv functions.
|
|
|
|
Avoid unnecessary rounding mode changes by first checking the
|
|
|
|
current rounding mode. Note the use of __glibc_unlikely is
|
|
|
|
important for performance. */
|
Set/restore rounding mode only when needed
The most common use case of math functions is with default rounding
mode, i.e. rounding to nearest. Setting and restoring rounding mode
is an unnecessary overhead for this, so I've added support for a
context, which does the set/restore only if the FP status needs a
change. The code is written such that only x86 uses these. Other
architectures should be unaffected by it, but would definitely benefit
if the set/restore has as much overhead relative to the rest of the
code, as the x86 bits do.
Here's a summary of the performance improvement due to these
improvements; I've only mentioned functions that use the set/restore
and have benchmark inputs for x86_64:
Before:
cos(): ITERS:4.69335e+08: TOTAL:28884.6Mcy, MAX:4080.28cy, MIN:57.562cy, 16248.6 calls/Mcy
exp(): ITERS:4.47604e+08: TOTAL:28796.2Mcy, MAX:207.721cy, MIN:62.385cy, 15543.9 calls/Mcy
pow(): ITERS:1.63485e+08: TOTAL:28879.9Mcy, MAX:362.255cy, MIN:172.469cy, 5660.86 calls/Mcy
sin(): ITERS:3.89578e+08: TOTAL:28900Mcy, MAX:704.859cy, MIN:47.583cy, 13480.2 calls/Mcy
tan(): ITERS:7.0971e+07: TOTAL:28902.2Mcy, MAX:1357.79cy, MIN:388.58cy, 2455.55 calls/Mcy
After:
cos(): ITERS:6.0014e+08: TOTAL:28875.9Mcy, MAX:364.283cy, MIN:45.716cy, 20783.4 calls/Mcy
exp(): ITERS:5.48578e+08: TOTAL:28764.9Mcy, MAX:191.617cy, MIN:51.011cy, 19071.1 calls/Mcy
pow(): ITERS:1.70013e+08: TOTAL:28873.6Mcy, MAX:689.522cy, MIN:163.989cy, 5888.18 calls/Mcy
sin(): ITERS:4.64079e+08: TOTAL:28891.5Mcy, MAX:6959.3cy, MIN:36.189cy, 16062.8 calls/Mcy
tan(): ITERS:7.2354e+07: TOTAL:28898.9Mcy, MAX:1295.57cy, MIN:380.698cy, 2503.7 calls/Mcy
So the improvements are:
cos: 27.9089%
exp: 22.6919%
pow: 4.01564%
sin: 19.1585%
tan: 1.96086%
The downside of the change is that it will have an adverse performance
impact on non-default rounding modes, but I think the tradeoff is
justified.
2013-06-12 05:06:48 +00:00
|
|
|
|
2014-06-23 16:15:41 +00:00
|
|
|
static __always_inline void
|
|
|
|
libc_feholdsetround_ctx (struct rm_ctx *ctx, int round)
|
|
|
|
{
|
|
|
|
ctx->updated_status = false;
|
|
|
|
|
|
|
|
/* Update rounding mode only if different. */
|
|
|
|
if (__glibc_unlikely (round != get_rounding_mode ()))
|
|
|
|
{
|
|
|
|
ctx->updated_status = true;
|
2014-12-31 22:07:52 +00:00
|
|
|
__fegetenv (&ctx->env);
|
2015-01-07 00:41:23 +00:00
|
|
|
__fesetround (round);
|
2014-06-23 16:15:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feresetround_ctx (struct rm_ctx *ctx)
|
|
|
|
{
|
|
|
|
/* Restore the rounding mode if updated. */
|
|
|
|
if (__glibc_unlikely (ctx->updated_status))
|
2015-01-07 19:01:20 +00:00
|
|
|
__feupdateenv (&ctx->env);
|
2014-06-23 16:15:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdsetround_noex_ctx (struct rm_ctx *ctx, int round)
|
|
|
|
{
|
|
|
|
/* Save exception flags and rounding mode. */
|
2014-12-31 22:07:52 +00:00
|
|
|
__fegetenv (&ctx->env);
|
2014-06-23 16:15:41 +00:00
|
|
|
|
|
|
|
/* Update rounding mode only if different. */
|
|
|
|
if (__glibc_unlikely (round != get_rounding_mode ()))
|
2015-01-07 00:41:23 +00:00
|
|
|
__fesetround (round);
|
2014-06-23 16:15:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feresetround_noex_ctx (struct rm_ctx *ctx)
|
|
|
|
{
|
|
|
|
/* Restore exception flags and rounding mode. */
|
2015-01-06 23:36:20 +00:00
|
|
|
__fesetenv (&ctx->env);
|
2014-06-23 16:15:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# define libc_feholdsetroundf_ctx libc_feholdsetround_ctx
|
|
|
|
# define libc_feholdsetroundl_ctx libc_feholdsetround_ctx
|
|
|
|
# define libc_feresetroundf_ctx libc_feresetround_ctx
|
|
|
|
# define libc_feresetroundl_ctx libc_feresetround_ctx
|
|
|
|
|
|
|
|
# define libc_feholdsetround_noexf_ctx libc_feholdsetround_noex_ctx
|
|
|
|
# define libc_feholdsetround_noexl_ctx libc_feholdsetround_noex_ctx
|
|
|
|
# define libc_feresetround_noexf_ctx libc_feresetround_noex_ctx
|
|
|
|
# define libc_feresetround_noexl_ctx libc_feresetround_noex_ctx
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef libc_feholdsetround_53bit_ctx
|
|
|
|
# define libc_feholdsetround_53bit_ctx libc_feholdsetround_ctx
|
|
|
|
#endif
|
|
|
|
#ifndef libc_feresetround_53bit_ctx
|
|
|
|
# define libc_feresetround_53bit_ctx libc_feresetround_ctx
|
Set/restore rounding mode only when needed
The most common use case of math functions is with default rounding
mode, i.e. rounding to nearest. Setting and restoring rounding mode
is an unnecessary overhead for this, so I've added support for a
context, which does the set/restore only if the FP status needs a
change. The code is written such that only x86 uses these. Other
architectures should be unaffected by it, but would definitely benefit
if the set/restore has as much overhead relative to the rest of the
code, as the x86 bits do.
Here's a summary of the performance improvement due to these
improvements; I've only mentioned functions that use the set/restore
and have benchmark inputs for x86_64:
Before:
cos(): ITERS:4.69335e+08: TOTAL:28884.6Mcy, MAX:4080.28cy, MIN:57.562cy, 16248.6 calls/Mcy
exp(): ITERS:4.47604e+08: TOTAL:28796.2Mcy, MAX:207.721cy, MIN:62.385cy, 15543.9 calls/Mcy
pow(): ITERS:1.63485e+08: TOTAL:28879.9Mcy, MAX:362.255cy, MIN:172.469cy, 5660.86 calls/Mcy
sin(): ITERS:3.89578e+08: TOTAL:28900Mcy, MAX:704.859cy, MIN:47.583cy, 13480.2 calls/Mcy
tan(): ITERS:7.0971e+07: TOTAL:28902.2Mcy, MAX:1357.79cy, MIN:388.58cy, 2455.55 calls/Mcy
After:
cos(): ITERS:6.0014e+08: TOTAL:28875.9Mcy, MAX:364.283cy, MIN:45.716cy, 20783.4 calls/Mcy
exp(): ITERS:5.48578e+08: TOTAL:28764.9Mcy, MAX:191.617cy, MIN:51.011cy, 19071.1 calls/Mcy
pow(): ITERS:1.70013e+08: TOTAL:28873.6Mcy, MAX:689.522cy, MIN:163.989cy, 5888.18 calls/Mcy
sin(): ITERS:4.64079e+08: TOTAL:28891.5Mcy, MAX:6959.3cy, MIN:36.189cy, 16062.8 calls/Mcy
tan(): ITERS:7.2354e+07: TOTAL:28898.9Mcy, MAX:1295.57cy, MIN:380.698cy, 2503.7 calls/Mcy
So the improvements are:
cos: 27.9089%
exp: 22.6919%
pow: 4.01564%
sin: 19.1585%
tan: 1.96086%
The downside of the change is that it will have an adverse performance
impact on non-default rounding modes, but I think the tradeoff is
justified.
2013-06-12 05:06:48 +00:00
|
|
|
#endif
|
|
|
|
|
2014-06-23 16:15:41 +00:00
|
|
|
#define SET_RESTORE_ROUND_GENERIC(RM,ROUNDFUNC,CLEANUPFUNC) \
|
|
|
|
struct rm_ctx ctx __attribute__((cleanup (CLEANUPFUNC ## _ctx))); \
|
|
|
|
ROUNDFUNC ## _ctx (&ctx, (RM))
|
|
|
|
|
|
|
|
/* Set the rounding mode within a lexical block. Restore the rounding mode to
|
|
|
|
the value at the start of the block. The exception mode must be preserved.
|
|
|
|
Exceptions raised within the block must be set in the exception flags.
|
|
|
|
Non-stop mode may be enabled inside the block. */
|
2012-03-10 16:55:53 +00:00
|
|
|
|
|
|
|
#define SET_RESTORE_ROUND(RM) \
|
Set/restore rounding mode only when needed
The most common use case of math functions is with default rounding
mode, i.e. rounding to nearest. Setting and restoring rounding mode
is an unnecessary overhead for this, so I've added support for a
context, which does the set/restore only if the FP status needs a
change. The code is written such that only x86 uses these. Other
architectures should be unaffected by it, but would definitely benefit
if the set/restore has as much overhead relative to the rest of the
code, as the x86 bits do.
Here's a summary of the performance improvement due to these
improvements; I've only mentioned functions that use the set/restore
and have benchmark inputs for x86_64:
Before:
cos(): ITERS:4.69335e+08: TOTAL:28884.6Mcy, MAX:4080.28cy, MIN:57.562cy, 16248.6 calls/Mcy
exp(): ITERS:4.47604e+08: TOTAL:28796.2Mcy, MAX:207.721cy, MIN:62.385cy, 15543.9 calls/Mcy
pow(): ITERS:1.63485e+08: TOTAL:28879.9Mcy, MAX:362.255cy, MIN:172.469cy, 5660.86 calls/Mcy
sin(): ITERS:3.89578e+08: TOTAL:28900Mcy, MAX:704.859cy, MIN:47.583cy, 13480.2 calls/Mcy
tan(): ITERS:7.0971e+07: TOTAL:28902.2Mcy, MAX:1357.79cy, MIN:388.58cy, 2455.55 calls/Mcy
After:
cos(): ITERS:6.0014e+08: TOTAL:28875.9Mcy, MAX:364.283cy, MIN:45.716cy, 20783.4 calls/Mcy
exp(): ITERS:5.48578e+08: TOTAL:28764.9Mcy, MAX:191.617cy, MIN:51.011cy, 19071.1 calls/Mcy
pow(): ITERS:1.70013e+08: TOTAL:28873.6Mcy, MAX:689.522cy, MIN:163.989cy, 5888.18 calls/Mcy
sin(): ITERS:4.64079e+08: TOTAL:28891.5Mcy, MAX:6959.3cy, MIN:36.189cy, 16062.8 calls/Mcy
tan(): ITERS:7.2354e+07: TOTAL:28898.9Mcy, MAX:1295.57cy, MIN:380.698cy, 2503.7 calls/Mcy
So the improvements are:
cos: 27.9089%
exp: 22.6919%
pow: 4.01564%
sin: 19.1585%
tan: 1.96086%
The downside of the change is that it will have an adverse performance
impact on non-default rounding modes, but I think the tradeoff is
justified.
2013-06-12 05:06:48 +00:00
|
|
|
SET_RESTORE_ROUND_GENERIC (RM, libc_feholdsetround, libc_feresetround)
|
2012-03-10 16:55:53 +00:00
|
|
|
#define SET_RESTORE_ROUNDF(RM) \
|
Set/restore rounding mode only when needed
The most common use case of math functions is with default rounding
mode, i.e. rounding to nearest. Setting and restoring rounding mode
is an unnecessary overhead for this, so I've added support for a
context, which does the set/restore only if the FP status needs a
change. The code is written such that only x86 uses these. Other
architectures should be unaffected by it, but would definitely benefit
if the set/restore has as much overhead relative to the rest of the
code, as the x86 bits do.
Here's a summary of the performance improvement due to these
improvements; I've only mentioned functions that use the set/restore
and have benchmark inputs for x86_64:
Before:
cos(): ITERS:4.69335e+08: TOTAL:28884.6Mcy, MAX:4080.28cy, MIN:57.562cy, 16248.6 calls/Mcy
exp(): ITERS:4.47604e+08: TOTAL:28796.2Mcy, MAX:207.721cy, MIN:62.385cy, 15543.9 calls/Mcy
pow(): ITERS:1.63485e+08: TOTAL:28879.9Mcy, MAX:362.255cy, MIN:172.469cy, 5660.86 calls/Mcy
sin(): ITERS:3.89578e+08: TOTAL:28900Mcy, MAX:704.859cy, MIN:47.583cy, 13480.2 calls/Mcy
tan(): ITERS:7.0971e+07: TOTAL:28902.2Mcy, MAX:1357.79cy, MIN:388.58cy, 2455.55 calls/Mcy
After:
cos(): ITERS:6.0014e+08: TOTAL:28875.9Mcy, MAX:364.283cy, MIN:45.716cy, 20783.4 calls/Mcy
exp(): ITERS:5.48578e+08: TOTAL:28764.9Mcy, MAX:191.617cy, MIN:51.011cy, 19071.1 calls/Mcy
pow(): ITERS:1.70013e+08: TOTAL:28873.6Mcy, MAX:689.522cy, MIN:163.989cy, 5888.18 calls/Mcy
sin(): ITERS:4.64079e+08: TOTAL:28891.5Mcy, MAX:6959.3cy, MIN:36.189cy, 16062.8 calls/Mcy
tan(): ITERS:7.2354e+07: TOTAL:28898.9Mcy, MAX:1295.57cy, MIN:380.698cy, 2503.7 calls/Mcy
So the improvements are:
cos: 27.9089%
exp: 22.6919%
pow: 4.01564%
sin: 19.1585%
tan: 1.96086%
The downside of the change is that it will have an adverse performance
impact on non-default rounding modes, but I think the tradeoff is
justified.
2013-06-12 05:06:48 +00:00
|
|
|
SET_RESTORE_ROUND_GENERIC (RM, libc_feholdsetroundf, libc_feresetroundf)
|
2012-03-10 16:55:53 +00:00
|
|
|
#define SET_RESTORE_ROUNDL(RM) \
|
Set/restore rounding mode only when needed
The most common use case of math functions is with default rounding
mode, i.e. rounding to nearest. Setting and restoring rounding mode
is an unnecessary overhead for this, so I've added support for a
context, which does the set/restore only if the FP status needs a
change. The code is written such that only x86 uses these. Other
architectures should be unaffected by it, but would definitely benefit
if the set/restore has as much overhead relative to the rest of the
code, as the x86 bits do.
Here's a summary of the performance improvement due to these
improvements; I've only mentioned functions that use the set/restore
and have benchmark inputs for x86_64:
Before:
cos(): ITERS:4.69335e+08: TOTAL:28884.6Mcy, MAX:4080.28cy, MIN:57.562cy, 16248.6 calls/Mcy
exp(): ITERS:4.47604e+08: TOTAL:28796.2Mcy, MAX:207.721cy, MIN:62.385cy, 15543.9 calls/Mcy
pow(): ITERS:1.63485e+08: TOTAL:28879.9Mcy, MAX:362.255cy, MIN:172.469cy, 5660.86 calls/Mcy
sin(): ITERS:3.89578e+08: TOTAL:28900Mcy, MAX:704.859cy, MIN:47.583cy, 13480.2 calls/Mcy
tan(): ITERS:7.0971e+07: TOTAL:28902.2Mcy, MAX:1357.79cy, MIN:388.58cy, 2455.55 calls/Mcy
After:
cos(): ITERS:6.0014e+08: TOTAL:28875.9Mcy, MAX:364.283cy, MIN:45.716cy, 20783.4 calls/Mcy
exp(): ITERS:5.48578e+08: TOTAL:28764.9Mcy, MAX:191.617cy, MIN:51.011cy, 19071.1 calls/Mcy
pow(): ITERS:1.70013e+08: TOTAL:28873.6Mcy, MAX:689.522cy, MIN:163.989cy, 5888.18 calls/Mcy
sin(): ITERS:4.64079e+08: TOTAL:28891.5Mcy, MAX:6959.3cy, MIN:36.189cy, 16062.8 calls/Mcy
tan(): ITERS:7.2354e+07: TOTAL:28898.9Mcy, MAX:1295.57cy, MIN:380.698cy, 2503.7 calls/Mcy
So the improvements are:
cos: 27.9089%
exp: 22.6919%
pow: 4.01564%
sin: 19.1585%
tan: 1.96086%
The downside of the change is that it will have an adverse performance
impact on non-default rounding modes, but I think the tradeoff is
justified.
2013-06-12 05:06:48 +00:00
|
|
|
SET_RESTORE_ROUND_GENERIC (RM, libc_feholdsetroundl, libc_feresetroundl)
|
2012-03-10 16:55:53 +00:00
|
|
|
|
2014-06-23 16:15:41 +00:00
|
|
|
/* Set the rounding mode within a lexical block. Restore the rounding mode to
|
|
|
|
the value at the start of the block. The exception mode must be preserved.
|
|
|
|
Exceptions raised within the block must be discarded, and exception flags
|
|
|
|
are restored to the value at the start of the block.
|
|
|
|
Non-stop mode may be enabled inside the block. */
|
2012-03-10 16:55:53 +00:00
|
|
|
|
|
|
|
#define SET_RESTORE_ROUND_NOEX(RM) \
|
2014-06-23 16:15:41 +00:00
|
|
|
SET_RESTORE_ROUND_GENERIC (RM, libc_feholdsetround_noex, \
|
|
|
|
libc_feresetround_noex)
|
2012-03-10 16:55:53 +00:00
|
|
|
#define SET_RESTORE_ROUND_NOEXF(RM) \
|
2014-06-23 16:15:41 +00:00
|
|
|
SET_RESTORE_ROUND_GENERIC (RM, libc_feholdsetround_noexf, \
|
|
|
|
libc_feresetround_noexf)
|
2012-03-10 16:55:53 +00:00
|
|
|
#define SET_RESTORE_ROUND_NOEXL(RM) \
|
2014-06-23 16:15:41 +00:00
|
|
|
SET_RESTORE_ROUND_GENERIC (RM, libc_feholdsetround_noexl, \
|
|
|
|
libc_feresetround_noexl)
|
2012-03-10 16:55:53 +00:00
|
|
|
|
|
|
|
/* Like SET_RESTORE_ROUND, but also set rounding precision to 53 bits. */
|
|
|
|
#define SET_RESTORE_ROUND_53BIT(RM) \
|
Set/restore rounding mode only when needed
The most common use case of math functions is with default rounding
mode, i.e. rounding to nearest. Setting and restoring rounding mode
is an unnecessary overhead for this, so I've added support for a
context, which does the set/restore only if the FP status needs a
change. The code is written such that only x86 uses these. Other
architectures should be unaffected by it, but would definitely benefit
if the set/restore has as much overhead relative to the rest of the
code, as the x86 bits do.
Here's a summary of the performance improvement due to these
improvements; I've only mentioned functions that use the set/restore
and have benchmark inputs for x86_64:
Before:
cos(): ITERS:4.69335e+08: TOTAL:28884.6Mcy, MAX:4080.28cy, MIN:57.562cy, 16248.6 calls/Mcy
exp(): ITERS:4.47604e+08: TOTAL:28796.2Mcy, MAX:207.721cy, MIN:62.385cy, 15543.9 calls/Mcy
pow(): ITERS:1.63485e+08: TOTAL:28879.9Mcy, MAX:362.255cy, MIN:172.469cy, 5660.86 calls/Mcy
sin(): ITERS:3.89578e+08: TOTAL:28900Mcy, MAX:704.859cy, MIN:47.583cy, 13480.2 calls/Mcy
tan(): ITERS:7.0971e+07: TOTAL:28902.2Mcy, MAX:1357.79cy, MIN:388.58cy, 2455.55 calls/Mcy
After:
cos(): ITERS:6.0014e+08: TOTAL:28875.9Mcy, MAX:364.283cy, MIN:45.716cy, 20783.4 calls/Mcy
exp(): ITERS:5.48578e+08: TOTAL:28764.9Mcy, MAX:191.617cy, MIN:51.011cy, 19071.1 calls/Mcy
pow(): ITERS:1.70013e+08: TOTAL:28873.6Mcy, MAX:689.522cy, MIN:163.989cy, 5888.18 calls/Mcy
sin(): ITERS:4.64079e+08: TOTAL:28891.5Mcy, MAX:6959.3cy, MIN:36.189cy, 16062.8 calls/Mcy
tan(): ITERS:7.2354e+07: TOTAL:28898.9Mcy, MAX:1295.57cy, MIN:380.698cy, 2503.7 calls/Mcy
So the improvements are:
cos: 27.9089%
exp: 22.6919%
pow: 4.01564%
sin: 19.1585%
tan: 1.96086%
The downside of the change is that it will have an adverse performance
impact on non-default rounding modes, but I think the tradeoff is
justified.
2013-06-12 05:06:48 +00:00
|
|
|
SET_RESTORE_ROUND_GENERIC (RM, libc_feholdsetround_53bit, \
|
|
|
|
libc_feresetround_53bit)
|
2012-03-10 16:55:53 +00:00
|
|
|
|
1996-03-05 21:41:30 +00:00
|
|
|
#endif /* _MATH_PRIVATE_H_ */
|