2012-03-18 22:58:00 +00:00
|
|
|
#ifndef FENV_PRIVATE_H
|
|
|
|
#define FENV_PRIVATE_H 1
|
|
|
|
|
2017-06-26 22:01:27 +00:00
|
|
|
#include <bits/floatn.h>
|
2012-03-18 22:58:00 +00:00
|
|
|
#include <fenv.h>
|
|
|
|
#include <fpu_control.h>
|
|
|
|
|
|
|
|
/* This file is used by both the 32- and 64-bit ports. The 64-bit port
|
|
|
|
has a field in the fenv_t for the mxcsr; the 32-bit port does not.
|
|
|
|
Instead, we (ab)use the only 32-bit field extant in the struct. */
|
|
|
|
#ifndef __x86_64__
|
|
|
|
# define __mxcsr __eip
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
/* All of these functions are private to libm, and are all used in pairs
|
|
|
|
to save+change the fp state and restore the original state. Thus we
|
|
|
|
need not care for both the 387 and the sse unit, only the one we're
|
|
|
|
actually using. */
|
|
|
|
|
|
|
|
#if defined __AVX__ || defined SSE2AVX
|
|
|
|
# define STMXCSR "vstmxcsr"
|
|
|
|
# define LDMXCSR "vldmxcsr"
|
|
|
|
#else
|
|
|
|
# define STMXCSR "stmxcsr"
|
|
|
|
# define LDMXCSR "ldmxcsr"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdexcept_sse (fenv_t *e)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr;
|
|
|
|
asm (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
e->__mxcsr = mxcsr;
|
|
|
|
mxcsr = (mxcsr | 0x1f80) & ~0x3f;
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdexcept_387 (fenv_t *e)
|
|
|
|
{
|
|
|
|
/* Recall that fnstenv has a side-effect of masking exceptions.
|
|
|
|
Clobber all of the fp registers so that the TOS field is 0. */
|
|
|
|
asm volatile ("fnstenv %0; fnclex"
|
|
|
|
: "=m"(*e)
|
|
|
|
: : "st", "st(1)", "st(2)", "st(3)",
|
|
|
|
"st(4)", "st(5)", "st(6)", "st(7)");
|
|
|
|
}
|
|
|
|
|
2012-11-03 19:48:53 +00:00
|
|
|
static __always_inline void
|
|
|
|
libc_fesetround_sse (int r)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr;
|
|
|
|
asm (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
mxcsr = (mxcsr & ~0x6000) | (r << 3);
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_fesetround_387 (int r)
|
|
|
|
{
|
|
|
|
fpu_control_t cw;
|
|
|
|
_FPU_GETCW (cw);
|
|
|
|
cw = (cw & ~0xc00) | r;
|
|
|
|
_FPU_SETCW (cw);
|
|
|
|
}
|
|
|
|
|
2012-03-18 22:58:00 +00:00
|
|
|
static __always_inline void
|
|
|
|
libc_feholdexcept_setround_sse (fenv_t *e, int r)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr;
|
|
|
|
asm (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
e->__mxcsr = mxcsr;
|
|
|
|
mxcsr = ((mxcsr | 0x1f80) & ~0x603f) | (r << 3);
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set both rounding mode and precision. A convenience function for use
|
|
|
|
by libc_feholdexcept_setround and libc_feholdexcept_setround_53bit. */
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdexcept_setround_387_prec (fenv_t *e, int r)
|
|
|
|
{
|
|
|
|
libc_feholdexcept_387 (e);
|
|
|
|
|
|
|
|
fpu_control_t cw = e->__control_word;
|
|
|
|
cw &= ~(_FPU_RC_ZERO | _FPU_EXTENDED);
|
|
|
|
cw |= r | 0x3f;
|
|
|
|
_FPU_SETCW (cw);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdexcept_setround_387 (fenv_t *e, int r)
|
|
|
|
{
|
|
|
|
libc_feholdexcept_setround_387_prec (e, r | _FPU_EXTENDED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdexcept_setround_387_53bit (fenv_t *e, int r)
|
|
|
|
{
|
|
|
|
libc_feholdexcept_setround_387_prec (e, r | _FPU_DOUBLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline int
|
|
|
|
libc_fetestexcept_sse (int e)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr;
|
|
|
|
asm volatile (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
return mxcsr & e & FE_ALL_EXCEPT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline int
|
|
|
|
libc_fetestexcept_387 (int ex)
|
|
|
|
{
|
|
|
|
fexcept_t temp;
|
|
|
|
asm volatile ("fnstsw %0" : "=a" (temp));
|
|
|
|
return temp & ex & FE_ALL_EXCEPT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_fesetenv_sse (fenv_t *e)
|
|
|
|
{
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (e->__mxcsr));
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_fesetenv_387 (fenv_t *e)
|
|
|
|
{
|
|
|
|
/* Clobber all fp registers so that the TOS value we saved earlier is
|
|
|
|
compatible with the current state of the compiler. */
|
|
|
|
asm volatile ("fldenv %0"
|
|
|
|
: : "m" (*e)
|
|
|
|
: "st", "st(1)", "st(2)", "st(3)",
|
|
|
|
"st(4)", "st(5)", "st(6)", "st(7)");
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline int
|
|
|
|
libc_feupdateenv_test_sse (fenv_t *e, int ex)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr, old_mxcsr, cur_ex;
|
|
|
|
asm volatile (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
cur_ex = mxcsr & FE_ALL_EXCEPT;
|
|
|
|
|
|
|
|
/* Merge current exceptions with the old environment. */
|
|
|
|
old_mxcsr = e->__mxcsr;
|
|
|
|
mxcsr = old_mxcsr | cur_ex;
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
|
|
|
|
|
|
|
|
/* Raise SIGFPE for any new exceptions since the hold. Expect that
|
|
|
|
the normal environment has all exceptions masked. */
|
2013-01-18 08:46:25 +00:00
|
|
|
if (__glibc_unlikely (~(old_mxcsr >> 7) & cur_ex))
|
2012-03-18 22:58:00 +00:00
|
|
|
__feraiseexcept (cur_ex);
|
|
|
|
|
|
|
|
/* Test for exceptions raised since the hold. */
|
|
|
|
return cur_ex & ex;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline int
|
|
|
|
libc_feupdateenv_test_387 (fenv_t *e, int ex)
|
|
|
|
{
|
|
|
|
fexcept_t cur_ex;
|
|
|
|
|
|
|
|
/* Save current exceptions. */
|
|
|
|
asm volatile ("fnstsw %0" : "=a" (cur_ex));
|
|
|
|
cur_ex &= FE_ALL_EXCEPT;
|
|
|
|
|
|
|
|
/* Reload original environment. */
|
|
|
|
libc_fesetenv_387 (e);
|
|
|
|
|
|
|
|
/* Merge current exceptions. */
|
|
|
|
__feraiseexcept (cur_ex);
|
|
|
|
|
|
|
|
/* Test for exceptions raised since the hold. */
|
|
|
|
return cur_ex & ex;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feupdateenv_sse (fenv_t *e)
|
|
|
|
{
|
|
|
|
libc_feupdateenv_test_sse (e, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feupdateenv_387 (fenv_t *e)
|
|
|
|
{
|
|
|
|
libc_feupdateenv_test_387 (e, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdsetround_sse (fenv_t *e, int r)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr;
|
|
|
|
asm (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
e->__mxcsr = mxcsr;
|
|
|
|
mxcsr = (mxcsr & ~0x6000) | (r << 3);
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdsetround_387_prec (fenv_t *e, int r)
|
|
|
|
{
|
|
|
|
fpu_control_t cw;
|
|
|
|
|
|
|
|
_FPU_GETCW (cw);
|
|
|
|
e->__control_word = cw;
|
|
|
|
cw &= ~(_FPU_RC_ZERO | _FPU_EXTENDED);
|
|
|
|
cw |= r;
|
|
|
|
_FPU_SETCW (cw);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdsetround_387 (fenv_t *e, int r)
|
|
|
|
{
|
|
|
|
libc_feholdsetround_387_prec (e, r | _FPU_EXTENDED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdsetround_387_53bit (fenv_t *e, int r)
|
|
|
|
{
|
|
|
|
libc_feholdsetround_387_prec (e, r | _FPU_DOUBLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feresetround_sse (fenv_t *e)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr;
|
|
|
|
asm (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
mxcsr = (mxcsr & ~0x6000) | (e->__mxcsr & 0x6000);
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feresetround_387 (fenv_t *e)
|
|
|
|
{
|
|
|
|
_FPU_SETCW (e->__control_word);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef __SSE_MATH__
|
|
|
|
# define libc_feholdexceptf libc_feholdexcept_sse
|
2012-11-03 19:48:53 +00:00
|
|
|
# define libc_fesetroundf libc_fesetround_sse
|
2012-03-18 22:58:00 +00:00
|
|
|
# define libc_feholdexcept_setroundf libc_feholdexcept_setround_sse
|
|
|
|
# define libc_fetestexceptf libc_fetestexcept_sse
|
|
|
|
# define libc_fesetenvf libc_fesetenv_sse
|
|
|
|
# define libc_feupdateenv_testf libc_feupdateenv_test_sse
|
|
|
|
# define libc_feupdateenvf libc_feupdateenv_sse
|
|
|
|
# define libc_feholdsetroundf libc_feholdsetround_sse
|
|
|
|
# define libc_feresetroundf libc_feresetround_sse
|
|
|
|
#else
|
|
|
|
# define libc_feholdexceptf libc_feholdexcept_387
|
2012-11-03 19:48:53 +00:00
|
|
|
# define libc_fesetroundf libc_fesetround_387
|
2012-03-18 22:58:00 +00:00
|
|
|
# define libc_feholdexcept_setroundf libc_feholdexcept_setround_387
|
|
|
|
# define libc_fetestexceptf libc_fetestexcept_387
|
|
|
|
# define libc_fesetenvf libc_fesetenv_387
|
|
|
|
# define libc_feupdateenv_testf libc_feupdateenv_test_387
|
|
|
|
# define libc_feupdateenvf libc_feupdateenv_387
|
|
|
|
# define libc_feholdsetroundf libc_feholdsetround_387
|
|
|
|
# define libc_feresetroundf libc_feresetround_387
|
|
|
|
#endif /* __SSE_MATH__ */
|
|
|
|
|
|
|
|
#ifdef __SSE2_MATH__
|
|
|
|
# define libc_feholdexcept libc_feholdexcept_sse
|
2012-11-03 19:48:53 +00:00
|
|
|
# define libc_fesetround libc_fesetround_sse
|
2012-03-18 22:58:00 +00:00
|
|
|
# define libc_feholdexcept_setround libc_feholdexcept_setround_sse
|
|
|
|
# define libc_fetestexcept libc_fetestexcept_sse
|
|
|
|
# define libc_fesetenv libc_fesetenv_sse
|
|
|
|
# define libc_feupdateenv_test libc_feupdateenv_test_sse
|
|
|
|
# define libc_feupdateenv libc_feupdateenv_sse
|
|
|
|
# define libc_feholdsetround libc_feholdsetround_sse
|
|
|
|
# define libc_feresetround libc_feresetround_sse
|
|
|
|
#else
|
|
|
|
# define libc_feholdexcept libc_feholdexcept_387
|
2012-11-03 19:48:53 +00:00
|
|
|
# define libc_fesetround libc_fesetround_387
|
2012-03-18 22:58:00 +00:00
|
|
|
# define libc_feholdexcept_setround libc_feholdexcept_setround_387
|
|
|
|
# define libc_fetestexcept libc_fetestexcept_387
|
|
|
|
# define libc_fesetenv libc_fesetenv_387
|
|
|
|
# define libc_feupdateenv_test libc_feupdateenv_test_387
|
|
|
|
# define libc_feupdateenv libc_feupdateenv_387
|
|
|
|
# define libc_feholdsetround libc_feholdsetround_387
|
|
|
|
# define libc_feresetround libc_feresetround_387
|
|
|
|
#endif /* __SSE2_MATH__ */
|
|
|
|
|
|
|
|
#define libc_feholdexceptl libc_feholdexcept_387
|
2012-11-03 19:48:53 +00:00
|
|
|
#define libc_fesetroundl libc_fesetround_387
|
2012-03-18 22:58:00 +00:00
|
|
|
#define libc_feholdexcept_setroundl libc_feholdexcept_setround_387
|
|
|
|
#define libc_fetestexceptl libc_fetestexcept_387
|
|
|
|
#define libc_fesetenvl libc_fesetenv_387
|
|
|
|
#define libc_feupdateenv_testl libc_feupdateenv_test_387
|
|
|
|
#define libc_feupdateenvl libc_feupdateenv_387
|
|
|
|
#define libc_feholdsetroundl libc_feholdsetround_387
|
|
|
|
#define libc_feresetroundl libc_feresetround_387
|
|
|
|
|
|
|
|
#ifndef __SSE2_MATH__
|
|
|
|
# define libc_feholdexcept_setround_53bit libc_feholdexcept_setround_387_53bit
|
|
|
|
# define libc_feholdsetround_53bit libc_feholdsetround_387_53bit
|
|
|
|
#endif
|
|
|
|
|
2017-06-26 22:01:27 +00:00
|
|
|
#ifdef __x86_64__
|
|
|
|
/* The SSE rounding mode is used by soft-fp (libgcc and glibc) on
|
|
|
|
x86_64, so that must be set for float128 computations. */
|
|
|
|
# define SET_RESTORE_ROUNDF128(RM) \
|
|
|
|
SET_RESTORE_ROUND_GENERIC (RM, libc_feholdsetround_sse, libc_feresetround_sse)
|
Add build infrastructure for narrowing libm functions.
TS 18661-1 defines libm functions that carry out an operation (+ - * /
sqrt fma) on their arguments and return a result rounded to a
(usually) narrower type, as if the original result were computed to
infinite precision and then rounded directly to the result type
without any intermediate rounding to the argument type. For example,
fadd, faddl and daddl for addition. These are the last remaining TS
18661-1 functions left to be added to glibc. TS 18661-3 extends this
to corresponding functions for _FloatN and _FloatNx types.
As functions parametrized by two rather than one varying
floating-point types, these functions require infrastructure in glibc
that was not required for previous libm functions. This patch
provides such infrastructure - excluding test support, and actual
function implementations, which will be in subsequent patches.
Declaring the functions uses a header bits/mathcalls-narrow.h, which
is included many times, for each relevant pair of types. This will
end up containing macro calls of the form
__MATHCALL_NARROW (__MATHCALL_NAME (add), __MATHCALL_REDIR_NAME (add), 2);
for each family of narrowing functions. (The structure of this macro
call, with the calls to __MATHCALL_NAME and __MATHCALL_REDIR_NAME
there rather than in the definition of __MATHCALL_NARROW, arises from
the names such as "add" *not* themselves being reserved identifiers -
meaning it's necessary to avoid any indirection that would result in a
user-defined "add" macro being expanded.) Whereas for existing
functions declaring long double functions is disabled if _LIBC in the
case where they alias double functions, to facilitate defining the
long double functions as aliases of the double ones, there is no such
logic for the narrowing functions in this patch. Rather, the files
defining such functions are expected to use #define to hide the
original declarations of the alias names, to avoid errors about
defining aliases with incompatible types.
math/Makefile support is added for building the functions (listed in
libm-narrow-fns, currently empty) for all relevant pairs of types. An
internal header math-narrow.h is added for macros shared between
multiple function implementations - currently a ROUND_TO_ODD macro to
facilitate writing functions using the round-to-odd implementation
approach, and alias macros to create all the required function
aliases. libc_feholdexcept_setroundf128 and libc_feupdateenv_testf128
are added for use when required (only for x86_64). float128_private.h
support is added for ldbl-128 narrowing functions to be used for
_Float128.
Certain things are specifically omitted from this patch and the
immediate followups. tgmath.h support is deferred; there remain
unresolved questions about how the type-generic macros for these
functions are supposed to work, especially in the case of arguments of
integer type. The math.h / bits/mathcalls-narrow.h logic, and the
logic for determining what functions / aliases to define, will need
some adjustments to support the sqrt and fma functions, where
e.g. f32xsqrtf64 can just be an alias for sqrt rather than a separate
function. TS 18661-1 defines FP_FAST_* macros but no support is
included for defining them (they won't in general be true without
architecture-specific optimized function versions).
For each of the function groups (add sub mul div sqrt fma) there are
always six functions present (e.g. fadd, faddl, daddl, f32addf64,
f32addf32x, f32xaddf64). When _Float64x and _Float128 are supported,
there are seven more (e.g. f32addf64x, f32addf128, f64addf64x,
f64addf128, f32xaddf64x, f32xaddf128, f64xaddf128). In addition, in
the ldbl-opt case there are function names such as __nldbl_daddl (an
alias for f32xaddf64, which is not a reserved name in TS 18661-1, only
in TS 18661-3), for calls to daddl to be mapped to in the
-mlong-double-64 case. (Calls to faddl just get mapped to fadd, and
for sqrt and fma there won't be __nldbl_* functions because dsqrtl and
dfmal can just be mapped to sqrt and fma with -mlong-double-64.)
While there are six or thirteen functions present in each group (plus
__nldbl_* names only as an ABI, not an API), not all are distinct;
they fall in various groups of aliases. There are two distinct
versions built if long double has the same format as double; four if
they have distinct formats but there is no _Float64x or _Float128
support; five if long double has binary128 format; seven when
_Float128 is distinct from long double.
Architecture-specific optimized versions are possible, but not
included in my patches. For example, IA64 generally supports
narrowing the result of most floating-point instructions; Power ISA
2.07 (POWER8) supports double values as arguments to float
instructions, with the results narrowed as expected; Power ISA 3
(POWER9) supports round-to-odd for float128 instructions, so meaning
that approach can be used without needing to set and restore the
rounding mode and test "inexact". I intend to leave any such
optimized versions to the architecture maintainers. Generally in such
cases it would also make sense for calls to these functions to be
expanded inline (given -fno-math-errno); I put a suggestion for TS
18661-1 built-in functions at <https://gcc.gnu.org/wiki/SummerOfCode>.
Tested for x86_64 (this patch in isolation, as well as testing for
various configurations in conjunction with further patches).
* math/bits/mathcalls-narrow.h: New file.
* include/bits/mathcalls-narrow.h: Likewise.
* math/math-narrow.h: Likewise.
* math/math.h (__MATHCALL_NARROW_ARGS_1): New macro.
(__MATHCALL_NARROW_ARGS_2): Likewise.
(__MATHCALL_NARROW_ARGS_3): Likewise.
(__MATHCALL_NARROW_NORMAL): Likewise.
(__MATHCALL_NARROW_REDIR): Likewise.
(__MATHCALL_NARROW): Likewise.
[__GLIBC_USE (IEC_60559_BFP_EXT)]: Repeatedly include
<bits/mathcalls-narrow.h> with _Mret_, _Marg_ and __MATHCALL_NAME
defined.
[__GLIBC_USE (IEC_60559_TYPES_EXT)]: Likewise.
* math/Makefile (headers): Add bits/mathcalls-narrow.h.
(libm-narrow-fns): New variable.
(libm-narrow-types-basic): Likewise.
(libm-narrow-types-ldouble-yes): Likewise.
(libm-narrow-types-float128-yes): Likewise.
(libm-narrow-types-float128-alias-yes): Likewise.
(libm-narrow-types): Likewise.
(libm-routines): Add narrowing functions.
* sysdeps/i386/fpu/fenv_private.h [__x86_64__]
(libc_feholdexcept_setroundf128): New macro.
[__x86_64__] (libc_feupdateenv_testf128): Likewise.
* sysdeps/ieee754/float128/float128_private.h: Include
<math/math-narrow.h>.
[libc_feholdexcept_setroundf128] (libc_feholdexcept_setroundl):
Undefine and redefine.
[libc_feupdateenv_testf128] (libc_feupdateenv_testl): Likewise.
(libm_alias_float_ldouble): Undefine and redefine.
(libm_alias_double_ldouble): Likewise.
2018-02-09 21:18:52 +00:00
|
|
|
# define libc_feholdexcept_setroundf128 libc_feholdexcept_setround_sse
|
|
|
|
# define libc_feupdateenv_testf128 libc_feupdateenv_test_sse
|
2018-02-28 21:55:51 +00:00
|
|
|
#else
|
|
|
|
/* The 387 rounding mode is used by soft-fp for 32-bit, but whether
|
|
|
|
387 or SSE exceptions are used depends on whether libgcc was built
|
|
|
|
for SSE math, which is not known when glibc is being built. */
|
|
|
|
# define libc_feholdexcept_setroundf128 default_libc_feholdexcept_setround
|
|
|
|
# define libc_feupdateenv_testf128 default_libc_feupdateenv_test
|
2017-06-26 22:01:27 +00:00
|
|
|
#endif
|
|
|
|
|
Set/restore rounding mode only when needed
The most common use case of math functions is with default rounding
mode, i.e. rounding to nearest. Setting and restoring rounding mode
is an unnecessary overhead for this, so I've added support for a
context, which does the set/restore only if the FP status needs a
change. The code is written such that only x86 uses these. Other
architectures should be unaffected by it, but would definitely benefit
if the set/restore has as much overhead relative to the rest of the
code, as the x86 bits do.
Here's a summary of the performance improvement due to these
improvements; I've only mentioned functions that use the set/restore
and have benchmark inputs for x86_64:
Before:
cos(): ITERS:4.69335e+08: TOTAL:28884.6Mcy, MAX:4080.28cy, MIN:57.562cy, 16248.6 calls/Mcy
exp(): ITERS:4.47604e+08: TOTAL:28796.2Mcy, MAX:207.721cy, MIN:62.385cy, 15543.9 calls/Mcy
pow(): ITERS:1.63485e+08: TOTAL:28879.9Mcy, MAX:362.255cy, MIN:172.469cy, 5660.86 calls/Mcy
sin(): ITERS:3.89578e+08: TOTAL:28900Mcy, MAX:704.859cy, MIN:47.583cy, 13480.2 calls/Mcy
tan(): ITERS:7.0971e+07: TOTAL:28902.2Mcy, MAX:1357.79cy, MIN:388.58cy, 2455.55 calls/Mcy
After:
cos(): ITERS:6.0014e+08: TOTAL:28875.9Mcy, MAX:364.283cy, MIN:45.716cy, 20783.4 calls/Mcy
exp(): ITERS:5.48578e+08: TOTAL:28764.9Mcy, MAX:191.617cy, MIN:51.011cy, 19071.1 calls/Mcy
pow(): ITERS:1.70013e+08: TOTAL:28873.6Mcy, MAX:689.522cy, MIN:163.989cy, 5888.18 calls/Mcy
sin(): ITERS:4.64079e+08: TOTAL:28891.5Mcy, MAX:6959.3cy, MIN:36.189cy, 16062.8 calls/Mcy
tan(): ITERS:7.2354e+07: TOTAL:28898.9Mcy, MAX:1295.57cy, MIN:380.698cy, 2503.7 calls/Mcy
So the improvements are:
cos: 27.9089%
exp: 22.6919%
pow: 4.01564%
sin: 19.1585%
tan: 1.96086%
The downside of the change is that it will have an adverse performance
impact on non-default rounding modes, but I think the tradeoff is
justified.
2013-06-12 05:06:48 +00:00
|
|
|
/* We have support for rounding mode context. */
|
|
|
|
#define HAVE_RM_CTX 1
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdexcept_setround_sse_ctx (struct rm_ctx *ctx, int r)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr, new_mxcsr;
|
|
|
|
asm (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
new_mxcsr = ((mxcsr | 0x1f80) & ~0x603f) | (r << 3);
|
|
|
|
|
|
|
|
ctx->env.__mxcsr = mxcsr;
|
|
|
|
if (__glibc_unlikely (mxcsr != new_mxcsr))
|
|
|
|
{
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (*&new_mxcsr));
|
|
|
|
ctx->updated_status = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
ctx->updated_status = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unconditional since we want to overwrite any exceptions that occurred in the
|
|
|
|
context. This is also why all fehold* functions unconditionally write into
|
|
|
|
ctx->env. */
|
|
|
|
static __always_inline void
|
|
|
|
libc_fesetenv_sse_ctx (struct rm_ctx *ctx)
|
|
|
|
{
|
|
|
|
libc_fesetenv_sse (&ctx->env);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feupdateenv_sse_ctx (struct rm_ctx *ctx)
|
|
|
|
{
|
|
|
|
if (__glibc_unlikely (ctx->updated_status))
|
|
|
|
libc_feupdateenv_test_sse (&ctx->env, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdexcept_setround_387_prec_ctx (struct rm_ctx *ctx, int r)
|
|
|
|
{
|
|
|
|
libc_feholdexcept_387 (&ctx->env);
|
|
|
|
|
|
|
|
fpu_control_t cw = ctx->env.__control_word;
|
|
|
|
fpu_control_t old_cw = cw;
|
|
|
|
cw &= ~(_FPU_RC_ZERO | _FPU_EXTENDED);
|
|
|
|
cw |= r | 0x3f;
|
|
|
|
|
|
|
|
if (__glibc_unlikely (old_cw != cw))
|
|
|
|
{
|
|
|
|
_FPU_SETCW (cw);
|
|
|
|
ctx->updated_status = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
ctx->updated_status = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdexcept_setround_387_ctx (struct rm_ctx *ctx, int r)
|
|
|
|
{
|
|
|
|
libc_feholdexcept_setround_387_prec_ctx (ctx, r | _FPU_EXTENDED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdexcept_setround_387_53bit_ctx (struct rm_ctx *ctx, int r)
|
|
|
|
{
|
|
|
|
libc_feholdexcept_setround_387_prec_ctx (ctx, r | _FPU_DOUBLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdsetround_387_prec_ctx (struct rm_ctx *ctx, int r)
|
|
|
|
{
|
|
|
|
fpu_control_t cw, new_cw;
|
|
|
|
|
|
|
|
_FPU_GETCW (cw);
|
|
|
|
new_cw = cw;
|
|
|
|
new_cw &= ~(_FPU_RC_ZERO | _FPU_EXTENDED);
|
|
|
|
new_cw |= r;
|
|
|
|
|
|
|
|
ctx->env.__control_word = cw;
|
|
|
|
if (__glibc_unlikely (new_cw != cw))
|
|
|
|
{
|
|
|
|
_FPU_SETCW (new_cw);
|
|
|
|
ctx->updated_status = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
ctx->updated_status = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdsetround_387_ctx (struct rm_ctx *ctx, int r)
|
|
|
|
{
|
|
|
|
libc_feholdsetround_387_prec_ctx (ctx, r | _FPU_EXTENDED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdsetround_387_53bit_ctx (struct rm_ctx *ctx, int r)
|
|
|
|
{
|
|
|
|
libc_feholdsetround_387_prec_ctx (ctx, r | _FPU_DOUBLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdsetround_sse_ctx (struct rm_ctx *ctx, int r)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr, new_mxcsr;
|
|
|
|
|
|
|
|
asm (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
new_mxcsr = (mxcsr & ~0x6000) | (r << 3);
|
|
|
|
|
|
|
|
ctx->env.__mxcsr = mxcsr;
|
|
|
|
if (__glibc_unlikely (new_mxcsr != mxcsr))
|
|
|
|
{
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (*&new_mxcsr));
|
|
|
|
ctx->updated_status = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
ctx->updated_status = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feresetround_sse_ctx (struct rm_ctx *ctx)
|
|
|
|
{
|
|
|
|
if (__glibc_unlikely (ctx->updated_status))
|
|
|
|
libc_feresetround_sse (&ctx->env);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feresetround_387_ctx (struct rm_ctx *ctx)
|
|
|
|
{
|
|
|
|
if (__glibc_unlikely (ctx->updated_status))
|
|
|
|
_FPU_SETCW (ctx->env.__control_word);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feupdateenv_387_ctx (struct rm_ctx *ctx)
|
|
|
|
{
|
|
|
|
if (__glibc_unlikely (ctx->updated_status))
|
|
|
|
libc_feupdateenv_test_387 (&ctx->env, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef __SSE_MATH__
|
|
|
|
# define libc_feholdexcept_setroundf_ctx libc_feholdexcept_setround_sse_ctx
|
|
|
|
# define libc_fesetenvf_ctx libc_fesetenv_sse_ctx
|
|
|
|
# define libc_feupdateenvf_ctx libc_feupdateenv_sse_ctx
|
|
|
|
# define libc_feholdsetroundf_ctx libc_feholdsetround_sse_ctx
|
|
|
|
# define libc_feresetroundf_ctx libc_feresetround_sse_ctx
|
|
|
|
#else
|
|
|
|
# define libc_feholdexcept_setroundf_ctx libc_feholdexcept_setround_387_ctx
|
|
|
|
# define libc_feupdateenvf_ctx libc_feupdateenv_387_ctx
|
|
|
|
# define libc_feholdsetroundf_ctx libc_feholdsetround_387_ctx
|
|
|
|
# define libc_feresetroundf_ctx libc_feresetround_387_ctx
|
|
|
|
#endif /* __SSE_MATH__ */
|
|
|
|
|
|
|
|
#ifdef __SSE2_MATH__
|
2018-06-21 06:04:29 +00:00
|
|
|
# if defined (__x86_64__) || !defined (MATH_SET_BOTH_ROUNDING_MODES)
|
|
|
|
# define libc_feholdexcept_setround_ctx libc_feholdexcept_setround_sse_ctx
|
|
|
|
# define libc_fesetenv_ctx libc_fesetenv_sse_ctx
|
|
|
|
# define libc_feupdateenv_ctx libc_feupdateenv_sse_ctx
|
|
|
|
# define libc_feholdsetround_ctx libc_feholdsetround_sse_ctx
|
|
|
|
# define libc_feresetround_ctx libc_feresetround_sse_ctx
|
|
|
|
# else
|
|
|
|
# define libc_feholdexcept_setround_ctx default_libc_feholdexcept_setround_ctx
|
|
|
|
# define libc_fesetenv_ctx default_libc_fesetenv_ctx
|
|
|
|
# define libc_feupdateenv_ctx default_libc_feupdateenv_ctx
|
|
|
|
# define libc_feholdsetround_ctx default_libc_feholdsetround_ctx
|
|
|
|
# define libc_feresetround_ctx default_libc_feresetround_ctx
|
|
|
|
# endif
|
Set/restore rounding mode only when needed
The most common use case of math functions is with default rounding
mode, i.e. rounding to nearest. Setting and restoring rounding mode
is an unnecessary overhead for this, so I've added support for a
context, which does the set/restore only if the FP status needs a
change. The code is written such that only x86 uses these. Other
architectures should be unaffected by it, but would definitely benefit
if the set/restore has as much overhead relative to the rest of the
code, as the x86 bits do.
Here's a summary of the performance improvement due to these
improvements; I've only mentioned functions that use the set/restore
and have benchmark inputs for x86_64:
Before:
cos(): ITERS:4.69335e+08: TOTAL:28884.6Mcy, MAX:4080.28cy, MIN:57.562cy, 16248.6 calls/Mcy
exp(): ITERS:4.47604e+08: TOTAL:28796.2Mcy, MAX:207.721cy, MIN:62.385cy, 15543.9 calls/Mcy
pow(): ITERS:1.63485e+08: TOTAL:28879.9Mcy, MAX:362.255cy, MIN:172.469cy, 5660.86 calls/Mcy
sin(): ITERS:3.89578e+08: TOTAL:28900Mcy, MAX:704.859cy, MIN:47.583cy, 13480.2 calls/Mcy
tan(): ITERS:7.0971e+07: TOTAL:28902.2Mcy, MAX:1357.79cy, MIN:388.58cy, 2455.55 calls/Mcy
After:
cos(): ITERS:6.0014e+08: TOTAL:28875.9Mcy, MAX:364.283cy, MIN:45.716cy, 20783.4 calls/Mcy
exp(): ITERS:5.48578e+08: TOTAL:28764.9Mcy, MAX:191.617cy, MIN:51.011cy, 19071.1 calls/Mcy
pow(): ITERS:1.70013e+08: TOTAL:28873.6Mcy, MAX:689.522cy, MIN:163.989cy, 5888.18 calls/Mcy
sin(): ITERS:4.64079e+08: TOTAL:28891.5Mcy, MAX:6959.3cy, MIN:36.189cy, 16062.8 calls/Mcy
tan(): ITERS:7.2354e+07: TOTAL:28898.9Mcy, MAX:1295.57cy, MIN:380.698cy, 2503.7 calls/Mcy
So the improvements are:
cos: 27.9089%
exp: 22.6919%
pow: 4.01564%
sin: 19.1585%
tan: 1.96086%
The downside of the change is that it will have an adverse performance
impact on non-default rounding modes, but I think the tradeoff is
justified.
2013-06-12 05:06:48 +00:00
|
|
|
#else
|
|
|
|
# define libc_feholdexcept_setround_ctx libc_feholdexcept_setround_387_ctx
|
|
|
|
# define libc_feupdateenv_ctx libc_feupdateenv_387_ctx
|
2014-06-27 14:52:13 +00:00
|
|
|
# define libc_feholdsetround_ctx libc_feholdsetround_387_ctx
|
Set/restore rounding mode only when needed
The most common use case of math functions is with default rounding
mode, i.e. rounding to nearest. Setting and restoring rounding mode
is an unnecessary overhead for this, so I've added support for a
context, which does the set/restore only if the FP status needs a
change. The code is written such that only x86 uses these. Other
architectures should be unaffected by it, but would definitely benefit
if the set/restore has as much overhead relative to the rest of the
code, as the x86 bits do.
Here's a summary of the performance improvement due to these
improvements; I've only mentioned functions that use the set/restore
and have benchmark inputs for x86_64:
Before:
cos(): ITERS:4.69335e+08: TOTAL:28884.6Mcy, MAX:4080.28cy, MIN:57.562cy, 16248.6 calls/Mcy
exp(): ITERS:4.47604e+08: TOTAL:28796.2Mcy, MAX:207.721cy, MIN:62.385cy, 15543.9 calls/Mcy
pow(): ITERS:1.63485e+08: TOTAL:28879.9Mcy, MAX:362.255cy, MIN:172.469cy, 5660.86 calls/Mcy
sin(): ITERS:3.89578e+08: TOTAL:28900Mcy, MAX:704.859cy, MIN:47.583cy, 13480.2 calls/Mcy
tan(): ITERS:7.0971e+07: TOTAL:28902.2Mcy, MAX:1357.79cy, MIN:388.58cy, 2455.55 calls/Mcy
After:
cos(): ITERS:6.0014e+08: TOTAL:28875.9Mcy, MAX:364.283cy, MIN:45.716cy, 20783.4 calls/Mcy
exp(): ITERS:5.48578e+08: TOTAL:28764.9Mcy, MAX:191.617cy, MIN:51.011cy, 19071.1 calls/Mcy
pow(): ITERS:1.70013e+08: TOTAL:28873.6Mcy, MAX:689.522cy, MIN:163.989cy, 5888.18 calls/Mcy
sin(): ITERS:4.64079e+08: TOTAL:28891.5Mcy, MAX:6959.3cy, MIN:36.189cy, 16062.8 calls/Mcy
tan(): ITERS:7.2354e+07: TOTAL:28898.9Mcy, MAX:1295.57cy, MIN:380.698cy, 2503.7 calls/Mcy
So the improvements are:
cos: 27.9089%
exp: 22.6919%
pow: 4.01564%
sin: 19.1585%
tan: 1.96086%
The downside of the change is that it will have an adverse performance
impact on non-default rounding modes, but I think the tradeoff is
justified.
2013-06-12 05:06:48 +00:00
|
|
|
# define libc_feresetround_ctx libc_feresetround_387_ctx
|
|
|
|
#endif /* __SSE2_MATH__ */
|
|
|
|
|
|
|
|
#define libc_feholdexcept_setroundl_ctx libc_feholdexcept_setround_387_ctx
|
|
|
|
#define libc_feupdateenvl_ctx libc_feupdateenv_387_ctx
|
|
|
|
#define libc_feholdsetroundl_ctx libc_feholdsetround_387_ctx
|
|
|
|
#define libc_feresetroundl_ctx libc_feresetround_387_ctx
|
|
|
|
|
|
|
|
#ifndef __SSE2_MATH__
|
|
|
|
# define libc_feholdsetround_53bit_ctx libc_feholdsetround_387_53bit_ctx
|
|
|
|
# define libc_feresetround_53bit_ctx libc_feresetround_387_ctx
|
|
|
|
#endif
|
|
|
|
|
2012-03-18 22:58:00 +00:00
|
|
|
#undef __mxcsr
|
|
|
|
|
|
|
|
#endif /* FENV_PRIVATE_H */
|