Optimize private 387 fenv access; share code between i386 and x86_64.

This commit is contained in:
Richard Henderson 2012-03-18 15:58:00 -07:00
parent d0adc92230
commit bd37f2ee31
4 changed files with 321 additions and 163 deletions

View File

@ -1,5 +1,19 @@
2012-03-19 Richard Henderson <rth@twiddle.net>
* sysdeps/i386/fpu/fenv_private.h: New file.
* sysdeps/i386/fpu/math_private.h: Use it.
(math_opt_barrier, math_force_eval): Remove.
(libc_feholdexcept_setround_53bit): Remove.
(libc_feupdateenv_53bit): Remove.
* sysdeps/x86_64/fpu/math_private.h: Likewise.
(math_opt_barrier, math_force_eval): Remove.
(libc_feholdexcept): Remove.
(libc_feholdexcept_setround): Remove.
(libc_fetestexcept, libc_fesetenv): Remove.
(libc_feupdateenv_test): Remove.
(libc_feupdateenv, libc_feholdsetround): Remove.
(libc_feresetround): Remove.
* sysdeps/i386/fpu/feholdexcpt.c (feholdexcept): Avoid the fldenv.
* sysdeps/x86_64/fpu/feholdexcpt.c (feholdexcept): Likewise.

View File

@ -0,0 +1,304 @@
#ifndef FENV_PRIVATE_H
#define FENV_PRIVATE_H 1
#include <fenv.h>
#include <fpu_control.h>
#ifdef __SSE2_MATH__
# define math_opt_barrier(x) \
({ __typeof(x) __x; \
if (sizeof (x) <= sizeof (double)) \
__asm ("" : "=x" (__x) : "0" (x)); \
else \
__asm ("" : "=t" (__x) : "0" (x)); \
__x; })
# define math_force_eval(x) \
do { \
if (sizeof (x) <= sizeof (double)) \
__asm __volatile ("" : : "x" (x)); \
else \
__asm __volatile ("" : : "f" (x)); \
} while (0)
#else
# define math_opt_barrier(x) \
({ __typeof (x) __x; \
__asm ("" : "=t" (__x) : "0" (x)); \
__x; })
# define math_force_eval(x) \
do { \
__typeof (x) __x = (x); \
if (sizeof (x) <= sizeof (double)) \
__asm __volatile ("" : : "m" (__x)); \
else \
__asm __volatile ("" : : "f" (__x)); \
} while (0)
#endif
/* This file is used by both the 32- and 64-bit ports. The 64-bit port
has a field in the fenv_t for the mxcsr; the 32-bit port does not.
Instead, we (ab)use the only 32-bit field extant in the struct. */
#ifndef __x86_64__
# define __mxcsr __eip
#endif
/* All of these functions are private to libm, and are all used in pairs
to save+change the fp state and restore the original state. Thus we
need not care for both the 387 and the sse unit, only the one we're
actually using. */
#if defined __AVX__ || defined SSE2AVX
# define STMXCSR "vstmxcsr"
# define LDMXCSR "vldmxcsr"
#else
# define STMXCSR "stmxcsr"
# define LDMXCSR "ldmxcsr"
#endif
static __always_inline void
libc_feholdexcept_sse (fenv_t *e)
{
unsigned int mxcsr;
asm (STMXCSR " %0" : "=m" (*&mxcsr));
e->__mxcsr = mxcsr;
mxcsr = (mxcsr | 0x1f80) & ~0x3f;
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
}
static __always_inline void
libc_feholdexcept_387 (fenv_t *e)
{
/* Recall that fnstenv has a side-effect of masking exceptions.
Clobber all of the fp registers so that the TOS field is 0. */
asm volatile ("fnstenv %0; fnclex"
: "=m"(*e)
: : "st", "st(1)", "st(2)", "st(3)",
"st(4)", "st(5)", "st(6)", "st(7)");
}
static __always_inline void
libc_feholdexcept_setround_sse (fenv_t *e, int r)
{
unsigned int mxcsr;
asm (STMXCSR " %0" : "=m" (*&mxcsr));
e->__mxcsr = mxcsr;
mxcsr = ((mxcsr | 0x1f80) & ~0x603f) | (r << 3);
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
}
/* Set both rounding mode and precision. A convenience function for use
by libc_feholdexcept_setround and libc_feholdexcept_setround_53bit. */
static __always_inline void
libc_feholdexcept_setround_387_prec (fenv_t *e, int r)
{
libc_feholdexcept_387 (e);
fpu_control_t cw = e->__control_word;
cw &= ~(_FPU_RC_ZERO | _FPU_EXTENDED);
cw |= r | 0x3f;
_FPU_SETCW (cw);
}
static __always_inline void
libc_feholdexcept_setround_387 (fenv_t *e, int r)
{
libc_feholdexcept_setround_387_prec (e, r | _FPU_EXTENDED);
}
static __always_inline void
libc_feholdexcept_setround_387_53bit (fenv_t *e, int r)
{
libc_feholdexcept_setround_387_prec (e, r | _FPU_DOUBLE);
}
static __always_inline int
libc_fetestexcept_sse (int e)
{
unsigned int mxcsr;
asm volatile (STMXCSR " %0" : "=m" (*&mxcsr));
return mxcsr & e & FE_ALL_EXCEPT;
}
static __always_inline int
libc_fetestexcept_387 (int ex)
{
fexcept_t temp;
asm volatile ("fnstsw %0" : "=a" (temp));
return temp & ex & FE_ALL_EXCEPT;
}
static __always_inline void
libc_fesetenv_sse (fenv_t *e)
{
asm volatile (LDMXCSR " %0" : : "m" (e->__mxcsr));
}
static __always_inline void
libc_fesetenv_387 (fenv_t *e)
{
/* Clobber all fp registers so that the TOS value we saved earlier is
compatible with the current state of the compiler. */
asm volatile ("fldenv %0"
: : "m" (*e)
: "st", "st(1)", "st(2)", "st(3)",
"st(4)", "st(5)", "st(6)", "st(7)");
}
static __always_inline int
libc_feupdateenv_test_sse (fenv_t *e, int ex)
{
unsigned int mxcsr, old_mxcsr, cur_ex;
asm volatile (STMXCSR " %0" : "=m" (*&mxcsr));
cur_ex = mxcsr & FE_ALL_EXCEPT;
/* Merge current exceptions with the old environment. */
old_mxcsr = e->__mxcsr;
mxcsr = old_mxcsr | cur_ex;
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
/* Raise SIGFPE for any new exceptions since the hold. Expect that
the normal environment has all exceptions masked. */
if (__builtin_expect ((old_mxcsr >> 7) & cur_ex, 0))
__feraiseexcept (cur_ex);
/* Test for exceptions raised since the hold. */
return cur_ex & ex;
}
static __always_inline int
libc_feupdateenv_test_387 (fenv_t *e, int ex)
{
fexcept_t cur_ex;
/* Save current exceptions. */
asm volatile ("fnstsw %0" : "=a" (cur_ex));
cur_ex &= FE_ALL_EXCEPT;
/* Reload original environment. */
libc_fesetenv_387 (e);
/* Merge current exceptions. */
__feraiseexcept (cur_ex);
/* Test for exceptions raised since the hold. */
return cur_ex & ex;
}
static __always_inline void
libc_feupdateenv_sse (fenv_t *e)
{
libc_feupdateenv_test_sse (e, 0);
}
static __always_inline void
libc_feupdateenv_387 (fenv_t *e)
{
libc_feupdateenv_test_387 (e, 0);
}
static __always_inline void
libc_feholdsetround_sse (fenv_t *e, int r)
{
unsigned int mxcsr;
asm (STMXCSR " %0" : "=m" (*&mxcsr));
e->__mxcsr = mxcsr;
mxcsr = (mxcsr & ~0x6000) | (r << 3);
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
}
static __always_inline void
libc_feholdsetround_387_prec (fenv_t *e, int r)
{
fpu_control_t cw;
_FPU_GETCW (cw);
e->__control_word = cw;
cw &= ~(_FPU_RC_ZERO | _FPU_EXTENDED);
cw |= r;
_FPU_SETCW (cw);
}
static __always_inline void
libc_feholdsetround_387 (fenv_t *e, int r)
{
libc_feholdsetround_387_prec (e, r | _FPU_EXTENDED);
}
static __always_inline void
libc_feholdsetround_387_53bit (fenv_t *e, int r)
{
libc_feholdsetround_387_prec (e, r | _FPU_DOUBLE);
}
static __always_inline void
libc_feresetround_sse (fenv_t *e)
{
unsigned int mxcsr;
asm (STMXCSR " %0" : "=m" (*&mxcsr));
mxcsr = (mxcsr & ~0x6000) | (e->__mxcsr & 0x6000);
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
}
static __always_inline void
libc_feresetround_387 (fenv_t *e)
{
_FPU_SETCW (e->__control_word);
}
#ifdef __SSE_MATH__
# define libc_feholdexceptf libc_feholdexcept_sse
# define libc_feholdexcept_setroundf libc_feholdexcept_setround_sse
# define libc_fetestexceptf libc_fetestexcept_sse
# define libc_fesetenvf libc_fesetenv_sse
# define libc_feupdateenv_testf libc_feupdateenv_test_sse
# define libc_feupdateenvf libc_feupdateenv_sse
# define libc_feholdsetroundf libc_feholdsetround_sse
# define libc_feresetroundf libc_feresetround_sse
#else
# define libc_feholdexceptf libc_feholdexcept_387
# define libc_feholdexcept_setroundf libc_feholdexcept_setround_387
# define libc_fetestexceptf libc_fetestexcept_387
# define libc_fesetenvf libc_fesetenv_387
# define libc_feupdateenv_testf libc_feupdateenv_test_387
# define libc_feupdateenvf libc_feupdateenv_387
# define libc_feholdsetroundf libc_feholdsetround_387
# define libc_feresetroundf libc_feresetround_387
#endif /* __SSE_MATH__ */
#ifdef __SSE2_MATH__
# define libc_feholdexcept libc_feholdexcept_sse
# define libc_feholdexcept_setround libc_feholdexcept_setround_sse
# define libc_fetestexcept libc_fetestexcept_sse
# define libc_fesetenv libc_fesetenv_sse
# define libc_feupdateenv_test libc_feupdateenv_test_sse
# define libc_feupdateenv libc_feupdateenv_sse
# define libc_feholdsetround libc_feholdsetround_sse
# define libc_feresetround libc_feresetround_sse
#else
# define libc_feholdexcept libc_feholdexcept_387
# define libc_feholdexcept_setround libc_feholdexcept_setround_387
# define libc_fetestexcept libc_fetestexcept_387
# define libc_fesetenv libc_fesetenv_387
# define libc_feupdateenv_test libc_feupdateenv_test_387
# define libc_feupdateenv libc_feupdateenv_387
# define libc_feholdsetround libc_feholdsetround_387
# define libc_feresetround libc_feresetround_387
#endif /* __SSE2_MATH__ */
#define libc_feholdexceptl libc_feholdexcept_387
#define libc_feholdexcept_setroundl libc_feholdexcept_setround_387
#define libc_fetestexceptl libc_fetestexcept_387
#define libc_fesetenvl libc_fesetenv_387
#define libc_feupdateenv_testl libc_feupdateenv_test_387
#define libc_feupdateenvl libc_feupdateenv_387
#define libc_feholdsetroundl libc_feholdsetround_387
#define libc_feresetroundl libc_feresetround_387
#ifndef __SSE2_MATH__
# define libc_feholdexcept_setround_53bit libc_feholdexcept_setround_387_53bit
# define libc_feholdsetround_53bit libc_feholdsetround_387_53bit
#endif
#undef __mxcsr
#endif /* FENV_PRIVATE_H */

View File

@ -1,48 +1,6 @@
#ifndef _MATH_PRIVATE_H
#include <fenv.h>
#include <fpu_control.h>
#define math_opt_barrier(x) \
({ __typeof (x) __x; \
__asm ("" : "=t" (__x) : "0" (x)); \
__x; })
#define math_force_eval(x) \
do \
{ \
__typeof (x) __x = (x); \
if (sizeof (x) <= sizeof (double)) \
__asm __volatile ("" : : "m" (__x)); \
else \
__asm __volatile ("" : : "f" (__x)); \
} \
while (0)
static __always_inline void
libc_feholdexcept_setround_53bit (fenv_t *e, int r)
{
feholdexcept (e);
fesetround (r);
fpu_control_t cw;
_FPU_GETCW (cw);
cw &= ~(fpu_control_t) _FPU_EXTENDED;
cw |= _FPU_DOUBLE;
_FPU_SETCW (cw);
}
#define libc_feholdexcept_setround_53bit libc_feholdexcept_setround_53bit
static __always_inline void
libc_feupdateenv_53bit (fenv_t *e)
{
feupdateenv (e);
/* Unfortunately, feupdateenv fails to affect the rounding precision.
We can get that back by restoring the exact control word we saved. */
_FPU_SETCW (e->__control_word);
}
#define libc_feupdateenv_53bit libc_feupdateenv_53bit
#include "fenv_private.h"
#include_next <math_private.h>
#endif /* _MATH_PRIVATE_H */
#endif

View File

@ -1,33 +1,12 @@
#ifndef X86_64_MATH_PRIVATE_H
#define X86_64_MATH_PRIVATE_H 1
#include <fenv.h>
#define math_opt_barrier(x) \
({ __typeof(x) __x; \
if (sizeof (x) <= sizeof (double)) \
__asm ("" : "=x" (__x) : "0" (x)); \
else \
__asm ("" : "=t" (__x) : "0" (x)); \
__x; })
#define math_force_eval(x) \
do { \
if (sizeof (x) <= sizeof (double)) \
__asm __volatile ("" : : "x" (x)); \
else \
__asm __volatile ("" : : "f" (x)); \
} while (0)
/* We can do a few things better on x86-64. */
#if defined __AVX__ || defined SSE2AVX
# define MOVD "vmovd"
# define STMXCSR "vstmxcsr"
# define LDMXCSR "vldmxcsr"
#else
# define MOVD "movd"
# define STMXCSR "stmxcsr"
# define LDMXCSR "ldmxcsr"
#endif
/* Direct movement of float into integer register. */
@ -64,104 +43,7 @@
f = f__; \
} while (0)
/* Specialized variants of the <fenv.h> interfaces which only handle
either the FPU or the SSE unit. */
static __always_inline void
libc_feholdexcept (fenv_t *e)
{
unsigned int mxcsr;
asm (STMXCSR " %0" : "=m" (*&mxcsr));
e->__mxcsr = mxcsr;
mxcsr = (mxcsr | 0x1f80) & ~0x3f;
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
}
#define libc_feholdexcept libc_feholdexcept
#define libc_feholdexceptf libc_feholdexcept
static __always_inline void
libc_feholdexcept_setround (fenv_t *e, int r)
{
unsigned int mxcsr;
asm (STMXCSR " %0" : "=m" (*&mxcsr));
e->__mxcsr = mxcsr;
mxcsr = ((mxcsr | 0x1f80) & ~0x603f) | (r << 3);
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
}
#define libc_feholdexcept_setround libc_feholdexcept_setround
#define libc_feholdexcept_setroundf libc_feholdexcept_setround
static __always_inline int
libc_fetestexcept (int e)
{
unsigned int mxcsr;
asm volatile (STMXCSR " %0" : "=m" (*&mxcsr));
return mxcsr & e & FE_ALL_EXCEPT;
}
#define libc_fetestexcept libc_fetestexcept
#define libc_fetestexceptf libc_fetestexcept
static __always_inline void
libc_fesetenv (fenv_t *e)
{
asm volatile (LDMXCSR " %0" : : "m" (e->__mxcsr));
}
#define libc_fesetenv libc_fesetenv
#define libc_fesetenvf libc_fesetenv
static __always_inline int
libc_feupdateenv_test (fenv_t *e, int ex)
{
unsigned int mxcsr, old_mxcsr, cur_ex;
asm volatile (STMXCSR " %0" : "=m" (*&mxcsr));
cur_ex = mxcsr & FE_ALL_EXCEPT;
/* Merge current exceptions with the old environment. */
old_mxcsr = e->__mxcsr;
mxcsr = old_mxcsr | cur_ex;
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
/* Raise SIGFPE for any new exceptions since the hold. Expect that
the normal environment has all exceptions masked. */
if (__builtin_expect ((old_mxcsr >> 7) & cur_ex, 0))
__feraiseexcept (cur_ex);
/* Test for exceptions raised since the hold. */
return cur_ex & ex;
}
#define libc_feupdateenv_test libc_feupdateenv_test
#define libc_feupdateenv_testf libc_feupdateenv_test
static __always_inline void
libc_feupdateenv (fenv_t *e)
{
libc_feupdateenv_test (e, 0);
}
#define libc_feupdateenv libc_feupdateenv
#define libc_feupdateenvf libc_feupdateenv
static __always_inline void
libc_feholdsetround (fenv_t *e, int r)
{
unsigned int mxcsr;
asm (STMXCSR " %0" : "=m" (*&mxcsr));
e->__mxcsr = mxcsr;
mxcsr = (mxcsr & ~0x6000) | (r << 3);
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
}
#define libc_feholdsetround libc_feholdsetround
#define libc_feholdsetroundf libc_feholdsetround
static __always_inline void
libc_feresetround (fenv_t *e)
{
unsigned int mxcsr;
asm (STMXCSR " %0" : "=m" (*&mxcsr));
mxcsr = (mxcsr & ~0x6000) | (e->__mxcsr & 0x6000);
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
}
#define libc_feresetround libc_feresetround
#define libc_feresetroundf libc_feresetround
#include <sysdeps/i386/fpu/fenv_private.h>
#include_next <math_private.h>
extern __always_inline double