Remove yet-unused 64-bit atomic hooks.

For now, I think we'll just be using sk_atomic_inc on 64-bit values, so
let's cut out the dead code for now.

NOTREECHECKS=true

BUG=skia:
R=bsalomon@google.com

Review URL: https://codereview.chromium.org/398003004
This commit is contained in:
Mike Klein 2014-07-16 15:20:58 -04:00
parent 122f9ecef8
commit e7fd6db41a
3 changed files with 0 additions and 30 deletions

View File

@ -22,20 +22,17 @@ static int64_t sk_atomic_inc(int64_t* addr);
* No additional memory barrier is required; this must act as a compiler barrier.
*/
static int32_t sk_atomic_add(int32_t* addr, int32_t inc);
static int64_t sk_atomic_add(int64_t* addr, int64_t inc);
/** Atomically subtracts one from the int referenced by addr and returns the previous value.
* This must act as a release (SL/S) memory barrier and as a compiler barrier.
*/
static int32_t sk_atomic_dec(int32_t* addr);
static int64_t sk_atomic_dec(int64_t* addr);
/** Atomic compare and set.
* If *addr == before, set *addr to after and return true, otherwise return false.
* This must act as a release (SL/S) memory barrier and as a compiler barrier.
*/
static bool sk_atomic_cas(int32_t* addr, int32_t before, int32_t after);
static bool sk_atomic_cas(int64_t* addr, int64_t before, int64_t after);
/** If sk_atomic_dec does not act as an acquire (L/SL) barrier,
* this must act as an acquire (L/SL) memory barrier and as a compiler barrier.

View File

@ -24,18 +24,10 @@ static inline __attribute__((always_inline)) int32_t sk_atomic_add(int32_t* addr
return __sync_fetch_and_add(addr, inc);
}
static inline __attribute__((always_inline)) int64_t sk_atomic_add(int64_t* addr, int64_t inc) {
return __sync_fetch_and_add(addr, inc);
}
static inline __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t* addr) {
return __sync_fetch_and_add(addr, -1);
}
static inline __attribute__((always_inline)) int64_t sk_atomic_dec(int64_t* addr) {
return __sync_fetch_and_add(addr, -1);
}
static inline __attribute__((always_inline)) void sk_membar_acquire__after_atomic_dec() { }
static inline __attribute__((always_inline)) bool sk_atomic_cas(int32_t* addr,
@ -44,12 +36,6 @@ static inline __attribute__((always_inline)) bool sk_atomic_cas(int32_t* addr,
return __sync_bool_compare_and_swap(addr, before, after);
}
static inline __attribute__((always_inline)) bool sk_atomic_cas(int64_t* addr,
int64_t before,
int64_t after) {
return __sync_bool_compare_and_swap(addr, before, after);
}
static inline __attribute__((always_inline)) void* sk_atomic_cas(void** addr,
void* before,
void* after) {

View File

@ -34,30 +34,17 @@ static inline int32_t sk_atomic_add(int32_t* addr, int32_t inc) {
return _InterlockedExchangeAdd(reinterpret_cast<long*>(addr), static_cast<long>(inc));
}
static inline int64_t sk_atomic_add(int64_t* addr, int64_t inc) {
return InterlockedExchangeAdd64(addr, inc);
}
static inline int32_t sk_atomic_dec(int32_t* addr) {
// InterlockedDecrement returns the new value, we want to return the old.
return _InterlockedDecrement(reinterpret_cast<long*>(addr)) + 1;
}
static inline int64_t sk_atomic_dec(int64_t* addr) {
// InterlockedDecrement returns the new value, we want to return the old.
return InterlockedDecrement64(addr) + 1;
}
static inline void sk_membar_acquire__after_atomic_dec() { }
static inline bool sk_atomic_cas(int32_t* addr, int32_t before, int32_t after) {
return _InterlockedCompareExchange(reinterpret_cast<long*>(addr), after, before) == before;
}
static inline bool sk_atomic_cas(int64_t* addr, int64_t before, int64_t after) {
return _InterlockedCompareExchange64(addr, after, before) == before;
}
static inline void* sk_atomic_cas(void** addr, void* before, void* after) {
return InterlockedCompareExchangePointer(addr, after, before);
}