2012-03-06 23:41:14 +00:00
|
|
|
#ifndef X86_64_MATH_PRIVATE_H
|
|
|
|
#define X86_64_MATH_PRIVATE_H 1
|
2007-04-16 20:41:42 +00:00
|
|
|
|
2012-03-09 20:51:27 +00:00
|
|
|
#include <fenv.h>
|
|
|
|
|
2007-04-16 20:41:42 +00:00
|
|
|
#define math_opt_barrier(x) \
|
2011-10-25 12:17:57 +00:00
|
|
|
({ __typeof(x) __x; \
|
|
|
|
if (sizeof (x) <= sizeof (double)) \
|
|
|
|
__asm ("" : "=x" (__x) : "0" (x)); \
|
|
|
|
else \
|
|
|
|
__asm ("" : "=t" (__x) : "0" (x)); \
|
|
|
|
__x; })
|
2007-04-16 20:41:42 +00:00
|
|
|
#define math_force_eval(x) \
|
2011-10-25 12:17:57 +00:00
|
|
|
do { \
|
|
|
|
if (sizeof (x) <= sizeof (double)) \
|
|
|
|
__asm __volatile ("" : : "x" (x)); \
|
|
|
|
else \
|
|
|
|
__asm __volatile ("" : : "f" (x)); \
|
|
|
|
} while (0)
|
2007-04-16 20:41:42 +00:00
|
|
|
|
2009-08-24 21:52:49 +00:00
|
|
|
/* We can do a few things better on x86-64. */
|
|
|
|
|
2012-01-28 19:48:46 +00:00
|
|
|
#if defined __AVX__ || defined SSE2AVX
|
2011-10-25 12:17:57 +00:00
|
|
|
# define MOVD "vmovd"
|
2012-01-28 16:19:06 +00:00
|
|
|
# define STMXCSR "vstmxcsr"
|
|
|
|
# define LDMXCSR "vldmxcsr"
|
2011-10-25 12:17:57 +00:00
|
|
|
#else
|
|
|
|
# define MOVD "movd"
|
2012-01-28 16:19:06 +00:00
|
|
|
# define STMXCSR "stmxcsr"
|
|
|
|
# define LDMXCSR "ldmxcsr"
|
2011-10-25 12:17:57 +00:00
|
|
|
#endif
|
|
|
|
|
2009-08-25 01:05:48 +00:00
|
|
|
/* Direct movement of float into integer register. */
|
2011-10-25 12:17:57 +00:00
|
|
|
#define EXTRACT_WORDS64(i, d) \
|
|
|
|
do { \
|
|
|
|
long int i_; \
|
|
|
|
asm (MOVD " %1, %0" : "=rm" (i_) : "x" ((double) (d))); \
|
|
|
|
(i) = i_; \
|
|
|
|
} while (0)
|
2009-08-25 01:05:48 +00:00
|
|
|
|
|
|
|
/* And the reverse. */
|
2011-10-25 12:17:57 +00:00
|
|
|
#define INSERT_WORDS64(d, i) \
|
|
|
|
do { \
|
|
|
|
long int i_ = i; \
|
|
|
|
double d__; \
|
|
|
|
asm (MOVD " %1, %0" : "=x" (d__) : "rm" (i_)); \
|
|
|
|
d = d__; \
|
|
|
|
} while (0)
|
2009-08-25 01:05:48 +00:00
|
|
|
|
2009-08-24 21:52:49 +00:00
|
|
|
/* Direct movement of float into integer register. */
|
2011-10-25 12:17:57 +00:00
|
|
|
#define GET_FLOAT_WORD(i, d) \
|
|
|
|
do { \
|
|
|
|
int i_; \
|
|
|
|
asm (MOVD " %1, %0" : "=rm" (i_) : "x" ((float) (d))); \
|
|
|
|
(i) = i_; \
|
|
|
|
} while (0)
|
2009-08-24 21:52:49 +00:00
|
|
|
|
|
|
|
/* And the reverse. */
|
2011-10-25 12:17:57 +00:00
|
|
|
#define SET_FLOAT_WORD(f, i) \
|
|
|
|
do { \
|
|
|
|
int i_ = i; \
|
|
|
|
float f__; \
|
|
|
|
asm (MOVD " %1, %0" : "=x" (f__) : "rm" (i_)); \
|
|
|
|
f = f__; \
|
|
|
|
} while (0)
|
2009-08-24 21:52:49 +00:00
|
|
|
|
2012-03-09 20:51:27 +00:00
|
|
|
/* Specialized variants of the <fenv.h> interfaces which only handle
|
|
|
|
either the FPU or the SSE unit. */
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdexcept (fenv_t *e)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr;
|
|
|
|
asm (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
e->__mxcsr = mxcsr;
|
|
|
|
mxcsr = (mxcsr | 0x1f80) & ~0x3f;
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
|
|
|
|
}
|
|
|
|
#define libc_feholdexcept libc_feholdexcept
|
|
|
|
#define libc_feholdexceptf libc_feholdexcept
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feholdexcept_setround (fenv_t *e, int r)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr;
|
|
|
|
asm (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
e->__mxcsr = mxcsr;
|
|
|
|
mxcsr = ((mxcsr | 0x1f80) & ~0x603f) | (r << 3);
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
|
|
|
|
}
|
|
|
|
#define libc_feholdexcept_setround libc_feholdexcept_setround
|
|
|
|
#define libc_feholdexcept_setroundf libc_feholdexcept_setround
|
|
|
|
|
|
|
|
static __always_inline int
|
|
|
|
libc_fetestexcept (int e)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr;
|
|
|
|
asm volatile (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
return mxcsr & e & FE_ALL_EXCEPT;
|
|
|
|
}
|
|
|
|
#define libc_fetestexcept libc_fetestexcept
|
|
|
|
#define libc_fetestexceptf libc_fetestexcept
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_fesetenv (fenv_t *e)
|
|
|
|
{
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (e->__mxcsr));
|
|
|
|
}
|
|
|
|
#define libc_fesetenv libc_fesetenv
|
|
|
|
#define libc_fesetenvf libc_fesetenv
|
|
|
|
|
2012-03-10 16:53:05 +00:00
|
|
|
static __always_inline int
|
|
|
|
libc_feupdateenv_test (fenv_t *e, int ex)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr, old_mxcsr, cur_ex;
|
|
|
|
asm volatile (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
cur_ex = mxcsr & FE_ALL_EXCEPT;
|
|
|
|
|
|
|
|
/* Merge current exceptions with the old environment. */
|
|
|
|
old_mxcsr = e->__mxcsr;
|
|
|
|
mxcsr = old_mxcsr | cur_ex;
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
|
|
|
|
|
|
|
|
/* Raise SIGFPE for any new exceptions since the hold. Expect that
|
|
|
|
the normal environment has all exceptions masked. */
|
|
|
|
if (__builtin_expect ((old_mxcsr >> 7) & cur_ex, 0))
|
|
|
|
__feraiseexcept (cur_ex);
|
|
|
|
|
|
|
|
/* Test for exceptions raised since the hold. */
|
|
|
|
return cur_ex & ex;
|
|
|
|
}
|
|
|
|
#define libc_feupdateenv_test libc_feupdateenv_test
|
|
|
|
#define libc_feupdateenv_testf libc_feupdateenv_test
|
|
|
|
|
2012-03-09 20:51:27 +00:00
|
|
|
static __always_inline void
|
|
|
|
libc_feupdateenv (fenv_t *e)
|
|
|
|
{
|
2012-03-10 16:53:05 +00:00
|
|
|
libc_feupdateenv_test (e, 0);
|
2012-03-09 20:51:27 +00:00
|
|
|
}
|
|
|
|
#define libc_feupdateenv libc_feupdateenv
|
|
|
|
#define libc_feupdateenvf libc_feupdateenv
|
|
|
|
|
2012-03-10 16:55:53 +00:00
|
|
|
static __always_inline void
|
|
|
|
libc_feholdsetround (fenv_t *e, int r)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr;
|
|
|
|
asm (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
e->__mxcsr = mxcsr;
|
|
|
|
mxcsr = (mxcsr & ~0x6000) | (r << 3);
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
|
|
|
|
}
|
|
|
|
#define libc_feholdsetround libc_feholdsetround
|
|
|
|
#define libc_feholdsetroundf libc_feholdsetround
|
|
|
|
|
|
|
|
static __always_inline void
|
|
|
|
libc_feresetround (fenv_t *e)
|
|
|
|
{
|
|
|
|
unsigned int mxcsr;
|
|
|
|
asm (STMXCSR " %0" : "=m" (*&mxcsr));
|
|
|
|
mxcsr = (mxcsr & ~0x6000) | (e->__mxcsr & 0x6000);
|
|
|
|
asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr));
|
|
|
|
}
|
|
|
|
#define libc_feresetround libc_feresetround
|
|
|
|
#define libc_feresetroundf libc_feresetround
|
|
|
|
|
2012-03-09 20:38:23 +00:00
|
|
|
#include_next <math_private.h>
|
2011-10-12 15:27:51 +00:00
|
|
|
|
2012-03-06 23:41:14 +00:00
|
|
|
extern __always_inline double
|
|
|
|
__ieee754_sqrt (double d)
|
|
|
|
{
|
|
|
|
double res;
|
2012-01-28 19:48:46 +00:00
|
|
|
#if defined __AVX__ || defined SSE2AVX
|
2012-03-06 23:41:14 +00:00
|
|
|
asm ("vsqrtsd %1, %0, %0" : "=x" (res) : "xm" (d));
|
2011-10-25 12:17:57 +00:00
|
|
|
#else
|
2012-03-06 23:41:14 +00:00
|
|
|
asm ("sqrtsd %1, %0" : "=x" (res) : "xm" (d));
|
2011-10-25 12:17:57 +00:00
|
|
|
#endif
|
2012-03-06 23:41:14 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern __always_inline float
|
|
|
|
__ieee754_sqrtf (float d)
|
|
|
|
{
|
|
|
|
float res;
|
|
|
|
#if defined __AVX__ || defined SSE2AVX
|
|
|
|
asm ("vsqrtss %1, %0, %0" : "=x" (res) : "xm" (d));
|
|
|
|
#else
|
|
|
|
asm ("sqrtss %1, %0" : "=x" (res) : "xm" (d));
|
|
|
|
#endif
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern __always_inline long double
|
|
|
|
__ieee754_sqrtl (long double d)
|
|
|
|
{
|
|
|
|
long double res;
|
|
|
|
asm ("fsqrt" : "=t" (res) : "0" (d));
|
|
|
|
return res;
|
|
|
|
}
|
2011-10-17 15:23:40 +00:00
|
|
|
|
|
|
|
#ifdef __SSE4_1__
|
2012-03-06 23:58:51 +00:00
|
|
|
extern __always_inline double
|
|
|
|
__rint (double d)
|
|
|
|
{
|
|
|
|
double res;
|
|
|
|
# if defined __AVX__ || defined SSE2AVX
|
|
|
|
asm ("vroundsd $4, %1, %0, %0" : "=x" (res) : "xm" (d));
|
|
|
|
# else
|
|
|
|
asm ("roundsd $4, %1, %0" : "=x" (res) : "xm" (d));
|
2011-10-17 15:23:40 +00:00
|
|
|
# endif
|
2012-03-06 23:58:51 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern __always_inline float
|
|
|
|
__rintf (float d)
|
|
|
|
{
|
|
|
|
float res;
|
|
|
|
# if defined __AVX__ || defined SSE2AVX
|
|
|
|
asm ("vroundss $4, %1, %0, %0" : "=x" (res) : "xm" (d));
|
|
|
|
# else
|
|
|
|
asm ("roundss $4, %1, %0" : "=x" (res) : "xm" (d));
|
2011-10-17 15:23:40 +00:00
|
|
|
# endif
|
2012-03-06 23:58:51 +00:00
|
|
|
return res;
|
|
|
|
}
|
2011-10-17 15:23:40 +00:00
|
|
|
|
2012-03-06 23:58:51 +00:00
|
|
|
extern __always_inline double
|
|
|
|
__floor (double d)
|
|
|
|
{
|
|
|
|
double res;
|
|
|
|
# if defined __AVX__ || defined SSE2AVX
|
|
|
|
asm ("vroundsd $1, %1, %0, %0" : "=x" (res) : "xm" (d));
|
|
|
|
# else
|
|
|
|
asm ("roundsd $1, %1, %0" : "=x" (res) : "xm" (d));
|
2011-10-17 15:23:40 +00:00
|
|
|
# endif
|
2012-03-06 23:58:51 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern __always_inline float
|
|
|
|
__floorf (float d)
|
|
|
|
{
|
|
|
|
float res;
|
|
|
|
# if defined __AVX__ || defined SSE2AVX
|
|
|
|
asm ("vroundss $1, %1, %0, %0" : "=x" (res) : "xm" (d));
|
|
|
|
# else
|
|
|
|
asm ("roundss $1, %1, %0" : "=x" (res) : "xm" (d));
|
2011-10-25 12:17:57 +00:00
|
|
|
# endif
|
2012-03-06 23:58:51 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
#endif /* __SSE4_1__ */
|
2011-10-18 13:00:46 +00:00
|
|
|
|
2012-03-06 23:41:14 +00:00
|
|
|
#endif /* X86_64_MATH_PRIVATE_H */
|