skia2/include/core/SkThread.h
bungeman@google.com d9947f605a Split atomic and mutex implementations and make inlinable.
Skia cannot use Chromium's implementation of mutex (Lock) due to static
initializers. However, we would like to be able to use Chromium's
implementation of atomics. This motivates the split of implementation.

Skia's atomic and mutex calls should be inlinable, especially the atomics.
These calls often compile down to very few instructions, and we currently have
the overhead of a function call. This motivates the header implementation.

There is still a desire for the build system to select the implementation, so
the SK_XXX_PLATFORM_H pattern for header files is introduced. This allows the
build system to control which platform specific header files are chosen.

The Chromium side changes (most of which will need to go in before this change
can be found at https://codereview.chromium.org/19477005/ .
The Chromium side changes after this lands can be seen at 
https://codereview.chromium.org/98073013 .

Review URL: https://codereview.chromium.org/19808007

git-svn-id: http://skia.googlecode.com/svn/trunk@12738 2bbb7eff-a529-9590-31e7-b0007b416f81
2013-12-18 15:27:39 +00:00

104 lines
2.8 KiB
C++

/*
* Copyright 2006 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkThread_DEFINED
#define SkThread_DEFINED
#include "SkTypes.h"
// SK_ATOMICS_PLATFORM_H must provide inline implementations for the following declarations.
/** Atomically adds one to the int referenced by addr and returns the previous value.
* No additional memory barrier is required; this must act as a compiler barrier.
*/
static int32_t sk_atomic_inc(int32_t* addr);
/** Atomically adds inc to the int referenced by addr and returns the previous value.
* No additional memory barrier is required; this must act as a compiler barrier.
*/
static int32_t sk_atomic_add(int32_t* addr, int32_t inc);
/** Atomically subtracts one from the int referenced by addr and returns the previous value.
* This must act as a release (SL/S) memory barrier and as a compiler barrier.
*/
static int32_t sk_atomic_dec(int32_t* addr);
/** Atomically adds one to the int referenced by addr iff the referenced int was not 0
* and returns the previous value.
* No additional memory barrier is required; this must act as a compiler barrier.
*/
static int32_t sk_atomic_conditional_inc(int32_t* addr);
/** If sk_atomic_dec does not act as an acquire (L/SL) barrier,
* this must act as an acquire (L/SL) memory barrier and as a compiler barrier.
*/
static void sk_membar_acquire__after_atomic_dec();
/** If sk_atomic_conditional_inc does not act as an acquire (L/SL) barrier,
* this must act as an acquire (L/SL) memory barrier and as a compiler barrier.
*/
static void sk_membar_acquire__after_atomic_conditional_inc();
#include SK_ATOMICS_PLATFORM_H
/** SK_MUTEX_PLATFORM_H must provide the following (or equivalent) declarations.
class SkBaseMutex {
public:
void acquire();
void release();
};
class SkMutex : SkBaseMutex {
public:
SkMutex();
~SkMutex();
};
#define SK_DECLARE_STATIC_MUTEX(name) static SkBaseMutex name = ...
#define SK_DECLARE_GLOBAL_MUTEX(name) SkBaseMutex name = ...
*/
#include SK_MUTEX_PLATFORM_H
class SkAutoMutexAcquire : SkNoncopyable {
public:
explicit SkAutoMutexAcquire(SkBaseMutex& mutex) : fMutex(&mutex) {
SkASSERT(fMutex != NULL);
mutex.acquire();
}
explicit SkAutoMutexAcquire(SkBaseMutex* mutex) : fMutex(mutex) {
if (mutex) {
mutex->acquire();
}
}
/** If the mutex has not been released, release it now. */
~SkAutoMutexAcquire() {
if (fMutex) {
fMutex->release();
}
}
/** If the mutex has not been released, release it now. */
void release() {
if (fMutex) {
fMutex->release();
fMutex = NULL;
}
}
private:
SkBaseMutex* fMutex;
};
#define SkAutoMutexAcquire(...) SK_REQUIRE_LOCAL_VAR(SkAutoMutexAcquire)
#endif