2012-03-06 23:41:14 +00:00
|
|
|
#ifndef X86_64_MATH_PRIVATE_H
|
|
|
|
#define X86_64_MATH_PRIVATE_H 1
|
2007-04-16 20:41:42 +00:00
|
|
|
|
2009-08-24 21:52:49 +00:00
|
|
|
/* We can do a few things better on x86-64. */
|
|
|
|
|
2012-01-28 19:48:46 +00:00
|
|
|
#if defined __AVX__ || defined SSE2AVX
|
2011-10-25 12:17:57 +00:00
|
|
|
# define MOVD "vmovd"
|
2013-05-15 18:33:45 +00:00
|
|
|
# define MOVQ "vmovq"
|
2011-10-25 12:17:57 +00:00
|
|
|
#else
|
|
|
|
# define MOVD "movd"
|
2013-05-15 18:33:45 +00:00
|
|
|
# define MOVQ "movq"
|
2011-10-25 12:17:57 +00:00
|
|
|
#endif
|
|
|
|
|
2009-08-25 01:05:48 +00:00
|
|
|
/* Direct movement of float into integer register. */
|
2011-10-25 12:17:57 +00:00
|
|
|
#define EXTRACT_WORDS64(i, d) \
|
|
|
|
do { \
|
2012-03-19 22:09:58 +00:00
|
|
|
int64_t i_; \
|
2013-05-15 18:33:45 +00:00
|
|
|
asm (MOVQ " %1, %0" : "=rm" (i_) : "x" ((double) (d))); \
|
2011-10-25 12:17:57 +00:00
|
|
|
(i) = i_; \
|
|
|
|
} while (0)
|
2009-08-25 01:05:48 +00:00
|
|
|
|
|
|
|
/* And the reverse. */
|
2011-10-25 12:17:57 +00:00
|
|
|
#define INSERT_WORDS64(d, i) \
|
|
|
|
do { \
|
2012-03-19 22:09:58 +00:00
|
|
|
int64_t i_ = i; \
|
2011-10-25 12:17:57 +00:00
|
|
|
double d__; \
|
2013-05-15 18:33:45 +00:00
|
|
|
asm (MOVQ " %1, %0" : "=x" (d__) : "rm" (i_)); \
|
2011-10-25 12:17:57 +00:00
|
|
|
d = d__; \
|
|
|
|
} while (0)
|
2009-08-25 01:05:48 +00:00
|
|
|
|
2009-08-24 21:52:49 +00:00
|
|
|
/* Direct movement of float into integer register. */
|
2011-10-25 12:17:57 +00:00
|
|
|
#define GET_FLOAT_WORD(i, d) \
|
|
|
|
do { \
|
|
|
|
int i_; \
|
|
|
|
asm (MOVD " %1, %0" : "=rm" (i_) : "x" ((float) (d))); \
|
|
|
|
(i) = i_; \
|
|
|
|
} while (0)
|
2009-08-24 21:52:49 +00:00
|
|
|
|
|
|
|
/* And the reverse. */
|
2011-10-25 12:17:57 +00:00
|
|
|
#define SET_FLOAT_WORD(f, i) \
|
|
|
|
do { \
|
|
|
|
int i_ = i; \
|
|
|
|
float f__; \
|
|
|
|
asm (MOVD " %1, %0" : "=x" (f__) : "rm" (i_)); \
|
|
|
|
f = f__; \
|
|
|
|
} while (0)
|
2009-08-24 21:52:49 +00:00
|
|
|
|
2012-03-18 22:58:00 +00:00
|
|
|
#include <sysdeps/i386/fpu/fenv_private.h>
|
2012-03-09 20:38:23 +00:00
|
|
|
#include_next <math_private.h>
|
2011-10-12 15:27:51 +00:00
|
|
|
|
2011-10-17 15:23:40 +00:00
|
|
|
#ifdef __SSE4_1__
|
2012-03-06 23:58:51 +00:00
|
|
|
extern __always_inline double
|
|
|
|
__rint (double d)
|
|
|
|
{
|
|
|
|
double res;
|
|
|
|
# if defined __AVX__ || defined SSE2AVX
|
|
|
|
asm ("vroundsd $4, %1, %0, %0" : "=x" (res) : "xm" (d));
|
|
|
|
# else
|
|
|
|
asm ("roundsd $4, %1, %0" : "=x" (res) : "xm" (d));
|
2011-10-17 15:23:40 +00:00
|
|
|
# endif
|
2012-03-06 23:58:51 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern __always_inline float
|
|
|
|
__rintf (float d)
|
|
|
|
{
|
|
|
|
float res;
|
|
|
|
# if defined __AVX__ || defined SSE2AVX
|
|
|
|
asm ("vroundss $4, %1, %0, %0" : "=x" (res) : "xm" (d));
|
|
|
|
# else
|
|
|
|
asm ("roundss $4, %1, %0" : "=x" (res) : "xm" (d));
|
2011-10-17 15:23:40 +00:00
|
|
|
# endif
|
2012-03-06 23:58:51 +00:00
|
|
|
return res;
|
|
|
|
}
|
2011-10-17 15:23:40 +00:00
|
|
|
|
2012-03-06 23:58:51 +00:00
|
|
|
extern __always_inline double
|
|
|
|
__floor (double d)
|
|
|
|
{
|
|
|
|
double res;
|
|
|
|
# if defined __AVX__ || defined SSE2AVX
|
|
|
|
asm ("vroundsd $1, %1, %0, %0" : "=x" (res) : "xm" (d));
|
|
|
|
# else
|
|
|
|
asm ("roundsd $1, %1, %0" : "=x" (res) : "xm" (d));
|
2011-10-17 15:23:40 +00:00
|
|
|
# endif
|
2012-03-06 23:58:51 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern __always_inline float
|
|
|
|
__floorf (float d)
|
|
|
|
{
|
|
|
|
float res;
|
|
|
|
# if defined __AVX__ || defined SSE2AVX
|
|
|
|
asm ("vroundss $1, %1, %0, %0" : "=x" (res) : "xm" (d));
|
|
|
|
# else
|
|
|
|
asm ("roundss $1, %1, %0" : "=x" (res) : "xm" (d));
|
2011-10-25 12:17:57 +00:00
|
|
|
# endif
|
2012-03-06 23:58:51 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
#endif /* __SSE4_1__ */
|
2011-10-18 13:00:46 +00:00
|
|
|
|
2012-03-06 23:41:14 +00:00
|
|
|
#endif /* X86_64_MATH_PRIVATE_H */
|