From bdd74070cc94ca50f1096808977268981308d7d6 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Wed, 9 May 2012 20:17:21 +0200 Subject: [PATCH] Add volatiles for x86-64 bits/mathinline.h [BZ #14053] GCC 4.7 might remove consecutive calls to e.g. lrintf since the assembler instructions are the same and GCC does not know that the result is different depending on the rounding mode. For SSE instructions, the control register is not available so there is no way to inform GCC about this. Therefore the asms are marked as volatile. --- ChangeLog | 13 ++++++++ NEWS | 2 +- sysdeps/x86_64/fpu/bits/mathinline.h | 48 +++++++++++++++++++++++----- 3 files changed, 54 insertions(+), 9 deletions(-) diff --git a/ChangeLog b/ChangeLog index 6d7243b27b..a89b191988 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,16 @@ +2012-05-09 Andreas Jaeger + + [BZ #14053] + * sysdeps/x86_64/fpu/bits/mathinline.h (lrintf): Add __volatile + to asm. + (lrint): Likewise. + (llrintf): Likewise. + (llrint): Likewise. + (rint): Likewise. + (rintf): Likewise. + (nearbyint): Likewise. + (nearbyintf): Likewise. + 2012-05-09 Andreas Jaeger Pedro Alves diff --git a/NEWS b/NEWS index a10e8868a7..2cf47c3921 100644 --- a/NEWS +++ b/NEWS @@ -24,7 +24,7 @@ Version 2.16 13895, 13908, 13910, 13911, 13912, 13913, 13914, 13915, 13916, 13917, 13918, 13919, 13920, 13921, 13922, 13923, 13924, 13926, 13927, 13928, 13938, 13941, 13942, 13963, 13967, 13970, 13973, 13979, 13983, 14027, - 14033, 14034, 14040, 14049, 14055, 14064, 14080, 14083 + 14033, 14034, 14040, 14049, 14053, 14055, 14064, 14080, 14083 * ISO C11 support: diff --git a/sysdeps/x86_64/fpu/bits/mathinline.h b/sysdeps/x86_64/fpu/bits/mathinline.h index c072f16a21..49a199b60b 100644 --- a/sysdeps/x86_64/fpu/bits/mathinline.h +++ b/sysdeps/x86_64/fpu/bits/mathinline.h @@ -79,7 +79,11 @@ __MATH_INLINE long int __NTH (lrintf (float __x)) { long int __res; - __asm ("cvtss2si %1, %0" : "=r" (__res) : "xm" (__x)); + /* Mark as volatile since the result is dependend on the state of + the SSE control register (the rounding mode). Otherwise GCC might + remove these assembler instructions since it does not know about + the rounding mode change and cannot currently be told. */ + __asm __volatile__ ("cvtss2si %1, %0" : "=r" (__res) : "xm" (__x)); return __res; } # endif @@ -88,7 +92,11 @@ __MATH_INLINE long int __NTH (lrint (double __x)) { long int __res; - __asm ("cvtsd2si %1, %0" : "=r" (__res) : "xm" (__x)); + /* Mark as volatile since the result is dependend on the state of + the SSE control register (the rounding mode). Otherwise GCC might + remove these assembler instructions since it does not know about + the rounding mode change and cannot currently be told. */ + __asm __volatile__ ("cvtsd2si %1, %0" : "=r" (__res) : "xm" (__x)); return __res; } # endif @@ -97,14 +105,22 @@ __MATH_INLINE long long int __NTH (llrintf (float __x)) { long long int __res; - __asm ("cvtss2si %1, %0" : "=r" (__res) : "xm" (__x)); + /* Mark as volatile since the result is dependend on the state of + the SSE control register (the rounding mode). Otherwise GCC might + remove these assembler instructions since it does not know about + the rounding mode change and cannot currently be told. */ + __asm __volatile__ ("cvtss2si %1, %0" : "=r" (__res) : "xm" (__x)); return __res; } __MATH_INLINE long long int __NTH (llrint (double __x)) { long long int __res; - __asm ("cvtsd2si %1, %0" : "=r" (__res) : "xm" (__x)); + /* Mark as volatile since the result is dependend on the state of + the SSE control register (the rounding mode). Otherwise GCC might + remove these assembler instructions since it does not know about + the rounding mode change and cannot currently be told. */ + __asm __volatile__ ("cvtsd2si %1, %0" : "=r" (__res) : "xm" (__x)); return __res; } # endif @@ -176,14 +192,22 @@ __MATH_INLINE double __NTH (rint (double __x)) { double __res; - __asm ("roundsd $4, %1, %0" : "=x" (__res) : "xm" (__x)); + /* Mark as volatile since the result is dependend on the state of + the SSE control register (the rounding mode). Otherwise GCC might + remove these assembler instructions since it does not know about + the rounding mode change and cannot currently be told. */ + __asm __volatile__ ("roundsd $4, %1, %0" : "=x" (__res) : "xm" (__x)); return __res; } __MATH_INLINE float __NTH (rintf (float __x)) { float __res; - __asm ("roundss $4, %1, %0" : "=x" (__res) : "xm" (__x)); + /* Mark as volatile since the result is dependend on the state of + the SSE control register (the rounding mode). Otherwise GCC might + remove these assembler instructions since it does not know about + the rounding mode change and cannot currently be told. */ + __asm __volatile__ ("roundss $4, %1, %0" : "=x" (__res) : "xm" (__x)); return __res; } @@ -193,14 +217,22 @@ __MATH_INLINE double __NTH (nearbyint (double __x)) { double __res; - __asm ("roundsd $0xc, %1, %0" : "=x" (__res) : "xm" (__x)); + /* Mark as volatile since the result is dependend on the state of + the SSE control register (the rounding mode). Otherwise GCC might + remove these assembler instructions since it does not know about + the rounding mode change and cannot currently be told. */ + __asm __volatile__ ("roundsd $0xc, %1, %0" : "=x" (__res) : "xm" (__x)); return __res; } __MATH_INLINE float __NTH (nearbyintf (float __x)) { float __res; - __asm ("roundss $0xc, %1, %0" : "=x" (__res) : "xm" (__x)); + /* Mark as volatile since the result is dependend on the state of + the SSE control register (the rounding mode). Otherwise GCC might + remove these assembler instructions since it does not know about + the rounding mode change and cannot currently be told. */ + __asm __volatile__ ("roundss $0xc, %1, %0" : "=x" (__res) : "xm" (__x)); return __res; } # endif