Fix the C++11 and GCC-atomic intrinsics when not using GCC

Both Clang and ICC complain about the use of those atomics when used
with a forward-declared pointee. GCC doesn't, which makes me think
it's a GCC bug.

When using QBasicAtomicPointer<Foo> with these atomics, the _q_value
member causes the instantiation of QAtomicOps<Foo>, which causes the
instantiation of the regular member function
QAtomicOps<Foo>::fetchAndAddRelaxed. The problem is that function
takes a QAtomicAdditiveType<Foo>::AdditiveT as parameter, which
requires sizeof(Foo). Clang 3.3 and ICC 14 correctly expand and
complain. GCC 4.7-4.9 apparently don't.

The fix is to apply the same trick we used for the other atomics:
change all ops functions (including fetchAndAddRelaxed) to be member
templates. That way, they can't be expanded until the actual use.

Clang errors:
qgenericatomic.h:73:33: error: invalid application of 'sizeof' to an incomplete type 'QMutexData'
qatomic_gcc.h:136:48: note: in instantiation of template class 'QAtomicAdditiveType<QMutexData *>' requested here
qbasicatomic.h:272:22: note: in instantiation of template class 'QAtomicOps<QMutexData *>' requested here

ICC errors:
qgenericatomic.h(73): error: incomplete type is not allowed
    detected during:
      instantiation of class "QAtomicAdditiveType<T *> [with T=QMutexData]" at line 111 of "qatomic_cxx11.h"
      instantiation of class "QAtomicOps<T> [with T=QMutexData *]" at line 272 of "qbasicatomic.h"

Found-by: Tor Arne
Change-Id: I9b10648cd47109a943b34a4c9926d77cd0c4fe12
Reviewed-by: Tor Arne Vestbø <tor.arne.vestbo@digia.com>
Reviewed-by: Olivier Goffart <ogoffart@woboq.com>
This commit is contained in:
Thiago Macieira 2013-09-16 10:59:31 -05:00 committed by The Qt Project
parent a33d9351a8
commit cfa5c1698d
2 changed files with 51 additions and 38 deletions

View File

@ -105,56 +105,56 @@ template<> struct QAtomicIntegerTraits<char32_t> { enum { IsInteger = 1 }; };
#define Q_ATOMIC_INT64_FETCH_AND_STORE_IS_ALWAYS_NATIVE #define Q_ATOMIC_INT64_FETCH_AND_STORE_IS_ALWAYS_NATIVE
#define Q_ATOMIC_INT64_FETCH_AND_ADD_IS_ALWAYS_NATIVE #define Q_ATOMIC_INT64_FETCH_AND_ADD_IS_ALWAYS_NATIVE
template <typename T> struct QAtomicOps template <typename X> struct QAtomicOps
{ {
typedef std::atomic<T> Type; typedef std::atomic<X> Type;
typedef typename QAtomicAdditiveType<T>::AdditiveT _AdditiveType;
static const int AddScale = QAtomicAdditiveType<T>::AddScale;
static inline template <typename T> static inline
T load(const Type &_q_value) Q_DECL_NOTHROW T load(const std::atomic<T> &_q_value) Q_DECL_NOTHROW
{ {
return _q_value.load(std::memory_order_relaxed); return _q_value.load(std::memory_order_relaxed);
} }
static inline template <typename T> static inline
T load(const volatile Type &_q_value) Q_DECL_NOTHROW T load(const volatile std::atomic<T> &_q_value) Q_DECL_NOTHROW
{ {
return _q_value.load(std::memory_order_relaxed); return _q_value.load(std::memory_order_relaxed);
} }
static inline template <typename T> static inline
T loadAcquire(const Type &_q_value) Q_DECL_NOTHROW T loadAcquire(const std::atomic<T> &_q_value) Q_DECL_NOTHROW
{ {
return _q_value.load(std::memory_order_acquire); return _q_value.load(std::memory_order_acquire);
} }
static inline template <typename T> static inline
T loadAcquire(const volatile Type &_q_value) Q_DECL_NOTHROW T loadAcquire(const volatile std::atomic<T> &_q_value) Q_DECL_NOTHROW
{ {
return _q_value.load(std::memory_order_acquire); return _q_value.load(std::memory_order_acquire);
} }
static inline template <typename T> static inline
void store(Type &_q_value, T newValue) Q_DECL_NOTHROW void store(std::atomic<T> &_q_value, T newValue) Q_DECL_NOTHROW
{ {
_q_value.store(newValue, std::memory_order_relaxed); _q_value.store(newValue, std::memory_order_relaxed);
} }
static inline template <typename T> static inline
void storeRelease(Type &_q_value, T newValue) Q_DECL_NOTHROW void storeRelease(std::atomic<T> &_q_value, T newValue) Q_DECL_NOTHROW
{ {
_q_value.store(newValue, std::memory_order_release); _q_value.store(newValue, std::memory_order_release);
} }
static inline Q_DECL_CONSTEXPR bool isReferenceCountingNative() Q_DECL_NOTHROW { return true; } static inline Q_DECL_CONSTEXPR bool isReferenceCountingNative() Q_DECL_NOTHROW { return true; }
static inline Q_DECL_CONSTEXPR bool isReferenceCountingWaitFree() Q_DECL_NOTHROW { return false; } static inline Q_DECL_CONSTEXPR bool isReferenceCountingWaitFree() Q_DECL_NOTHROW { return false; }
static inline bool ref(Type &_q_value) template <typename T>
static inline bool ref(std::atomic<T> &_q_value)
{ {
return ++_q_value != 0; return ++_q_value != 0;
} }
static inline bool deref(Type &_q_value) Q_DECL_NOTHROW template <typename T>
static inline bool deref(std::atomic<T> &_q_value) Q_DECL_NOTHROW
{ {
return --_q_value != 0; return --_q_value != 0;
} }
@ -162,22 +162,25 @@ template <typename T> struct QAtomicOps
static inline Q_DECL_CONSTEXPR bool isTestAndSetNative() Q_DECL_NOTHROW { return false; } static inline Q_DECL_CONSTEXPR bool isTestAndSetNative() Q_DECL_NOTHROW { return false; }
static inline Q_DECL_CONSTEXPR bool isTestAndSetWaitFree() Q_DECL_NOTHROW { return false; } static inline Q_DECL_CONSTEXPR bool isTestAndSetWaitFree() Q_DECL_NOTHROW { return false; }
static template <typename T> static
bool testAndSetRelaxed(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW bool testAndSetRelaxed(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW
{ {
return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_relaxed); return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_relaxed);
} }
template <typename T>
static bool testAndSetAcquire(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW static bool testAndSetAcquire(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW
{ {
return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_acquire); return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_acquire);
} }
template <typename T>
static bool testAndSetRelease(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW static bool testAndSetRelease(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW
{ {
return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_release); return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_release);
} }
template <typename T>
static bool testAndSetOrdered(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW static bool testAndSetOrdered(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW
{ {
return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_acq_rel); return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_acq_rel);
@ -186,22 +189,26 @@ template <typename T> struct QAtomicOps
static inline Q_DECL_CONSTEXPR bool isFetchAndStoreNative() Q_DECL_NOTHROW { return false; } static inline Q_DECL_CONSTEXPR bool isFetchAndStoreNative() Q_DECL_NOTHROW { return false; }
static inline Q_DECL_CONSTEXPR bool isFetchAndStoreWaitFree() Q_DECL_NOTHROW { return false; } static inline Q_DECL_CONSTEXPR bool isFetchAndStoreWaitFree() Q_DECL_NOTHROW { return false; }
static T fetchAndStoreRelaxed(Type &_q_value, T newValue) Q_DECL_NOTHROW template <typename T>
static T fetchAndStoreRelaxed(std::atomic<T> &_q_value, T newValue) Q_DECL_NOTHROW
{ {
return _q_value.exchange(newValue, std::memory_order_relaxed); return _q_value.exchange(newValue, std::memory_order_relaxed);
} }
static T fetchAndStoreAcquire(Type &_q_value, T newValue) Q_DECL_NOTHROW template <typename T>
static T fetchAndStoreAcquire(std::atomic<T> &_q_value, T newValue) Q_DECL_NOTHROW
{ {
return _q_value.exchange(newValue, std::memory_order_acquire); return _q_value.exchange(newValue, std::memory_order_acquire);
} }
static T fetchAndStoreRelease(Type &_q_value, T newValue) Q_DECL_NOTHROW template <typename T>
static T fetchAndStoreRelease(std::atomic<T> &_q_value, T newValue) Q_DECL_NOTHROW
{ {
return _q_value.exchange(newValue, std::memory_order_release); return _q_value.exchange(newValue, std::memory_order_release);
} }
static T fetchAndStoreOrdered(Type &_q_value, T newValue) Q_DECL_NOTHROW template <typename T>
static T fetchAndStoreOrdered(std::atomic<T> &_q_value, T newValue) Q_DECL_NOTHROW
{ {
return _q_value.exchange(newValue, std::memory_order_acq_rel); return _q_value.exchange(newValue, std::memory_order_acq_rel);
} }
@ -209,31 +216,31 @@ template <typename T> struct QAtomicOps
static inline Q_DECL_CONSTEXPR bool isFetchAndAddNative() Q_DECL_NOTHROW { return false; } static inline Q_DECL_CONSTEXPR bool isFetchAndAddNative() Q_DECL_NOTHROW { return false; }
static inline Q_DECL_CONSTEXPR bool isFetchAndAddWaitFree() Q_DECL_NOTHROW { return false; } static inline Q_DECL_CONSTEXPR bool isFetchAndAddWaitFree() Q_DECL_NOTHROW { return false; }
static template <typename T> static inline
T fetchAndAddRelaxed(Type &_q_value, _AdditiveType valueToAdd) Q_DECL_NOTHROW T fetchAndAddRelaxed(std::atomic<T> &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd) Q_DECL_NOTHROW
{ {
return _q_value.fetch_add(valueToAdd * AddScale, return _q_value.fetch_add(valueToAdd * QAtomicAdditiveType<T>::AddScale,
std::memory_order_relaxed); std::memory_order_relaxed);
} }
static template <typename T> static inline
T fetchAndAddAcquire(Type &_q_value, _AdditiveType valueToAdd) Q_DECL_NOTHROW T fetchAndAddAcquire(std::atomic<T> &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd) Q_DECL_NOTHROW
{ {
return _q_value.fetch_add(valueToAdd * AddScale, return _q_value.fetch_add(valueToAdd * QAtomicAdditiveType<T>::AddScale,
std::memory_order_acquire); std::memory_order_acquire);
} }
static template <typename T> static inline
T fetchAndAddRelease(Type &_q_value, _AdditiveType valueToAdd) Q_DECL_NOTHROW T fetchAndAddRelease(std::atomic<T> &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd) Q_DECL_NOTHROW
{ {
return _q_value.fetch_add(valueToAdd * AddScale, return _q_value.fetch_add(valueToAdd * QAtomicAdditiveType<T>::AddScale,
std::memory_order_release); std::memory_order_release);
} }
static template <typename T> static inline
T fetchAndAddOrdered(Type &_q_value, _AdditiveType valueToAdd) Q_DECL_NOTHROW T fetchAndAddOrdered(std::atomic<T> &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd) Q_DECL_NOTHROW
{ {
return _q_value.fetch_add(valueToAdd * AddScale, return _q_value.fetch_add(valueToAdd * QAtomicAdditiveType<T>::AddScale,
std::memory_order_acq_rel); std::memory_order_acq_rel);
} }
}; };

View File

@ -75,16 +75,17 @@ template<> struct QAtomicIntegerTraits<char32_t> { enum { IsInteger = 1 }; };
#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_SOMETIMES_NATIVE #define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_SOMETIMES_NATIVE
#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_SOMETIMES_NATIVE #define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_SOMETIMES_NATIVE
template <typename T> struct QAtomicOps: QGenericAtomicOps<QAtomicOps<T> > template <typename X> struct QAtomicOps: QGenericAtomicOps<QAtomicOps<X> >
{ {
// The GCC intrinsics all have fully-ordered memory semantics, so we define // The GCC intrinsics all have fully-ordered memory semantics, so we define
// only the xxxRelaxed functions. The exception is __sync_lock_and_test, // only the xxxRelaxed functions. The exception is __sync_lock_and_test,
// which has acquire semantics, so we need to define the Release and // which has acquire semantics, so we need to define the Release and
// Ordered versions too. // Ordered versions too.
typedef T Type; typedef X Type;
#ifndef __ia64__ #ifndef __ia64__
template <typename T>
static T loadAcquire(const T &_q_value) Q_DECL_NOTHROW static T loadAcquire(const T &_q_value) Q_DECL_NOTHROW
{ {
T tmp = _q_value; T tmp = _q_value;
@ -92,6 +93,7 @@ template <typename T> struct QAtomicOps: QGenericAtomicOps<QAtomicOps<T> >
return tmp; return tmp;
} }
template <typename T>
static void storeRelease(T &_q_value, T newValue) Q_DECL_NOTHROW static void storeRelease(T &_q_value, T newValue) Q_DECL_NOTHROW
{ {
__sync_synchronize(); __sync_synchronize();
@ -101,28 +103,32 @@ template <typename T> struct QAtomicOps: QGenericAtomicOps<QAtomicOps<T> >
static Q_DECL_CONSTEXPR bool isTestAndSetNative() Q_DECL_NOTHROW { return false; } static Q_DECL_CONSTEXPR bool isTestAndSetNative() Q_DECL_NOTHROW { return false; }
static Q_DECL_CONSTEXPR bool isTestAndSetWaitFree() Q_DECL_NOTHROW { return false; } static Q_DECL_CONSTEXPR bool isTestAndSetWaitFree() Q_DECL_NOTHROW { return false; }
template <typename T>
static bool testAndSetRelaxed(T &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW static bool testAndSetRelaxed(T &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW
{ {
return __sync_bool_compare_and_swap(&_q_value, expectedValue, newValue); return __sync_bool_compare_and_swap(&_q_value, expectedValue, newValue);
} }
template <typename T>
static T fetchAndStoreRelaxed(T &_q_value, T newValue) Q_DECL_NOTHROW static T fetchAndStoreRelaxed(T &_q_value, T newValue) Q_DECL_NOTHROW
{ {
return __sync_lock_test_and_set(&_q_value, newValue); return __sync_lock_test_and_set(&_q_value, newValue);
} }
template <typename T>
static T fetchAndStoreRelease(T &_q_value, T newValue) Q_DECL_NOTHROW static T fetchAndStoreRelease(T &_q_value, T newValue) Q_DECL_NOTHROW
{ {
__sync_synchronize(); __sync_synchronize();
return __sync_lock_test_and_set(&_q_value, newValue); return __sync_lock_test_and_set(&_q_value, newValue);
} }
template <typename T>
static T fetchAndStoreOrdered(T &_q_value, T newValue) Q_DECL_NOTHROW static T fetchAndStoreOrdered(T &_q_value, T newValue) Q_DECL_NOTHROW
{ {
return fetchAndStoreRelease(_q_value, newValue); return fetchAndStoreRelease(_q_value, newValue);
} }
static template <typename T> static
T fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd) Q_DECL_NOTHROW T fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd) Q_DECL_NOTHROW
{ {
return __sync_fetch_and_add(&_q_value, valueToAdd * QAtomicAdditiveType<T>::AddScale); return __sync_fetch_and_add(&_q_value, valueToAdd * QAtomicAdditiveType<T>::AddScale);