2012-10-31 15:52:16 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2012 Google Inc.
|
|
|
|
*
|
|
|
|
* Use of this source code is governed by a BSD-style license that can be
|
|
|
|
* found in the LICENSE file.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef SkThreadPool_DEFINED
|
|
|
|
#define SkThreadPool_DEFINED
|
|
|
|
|
|
|
|
#include "SkCondVar.h"
|
2013-10-10 18:49:04 +00:00
|
|
|
#include "SkRunnable.h"
|
2012-10-31 15:52:16 +00:00
|
|
|
#include "SkTDArray.h"
|
2012-12-03 14:54:59 +00:00
|
|
|
#include "SkTInternalLList.h"
|
2014-02-28 20:31:31 +00:00
|
|
|
#include "SkThreadUtils.h"
|
|
|
|
#include "SkTypes.h"
|
2012-10-31 15:52:16 +00:00
|
|
|
|
2014-02-28 20:31:31 +00:00
|
|
|
#if defined(SK_BUILD_FOR_UNIX) || defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_ANDROID)
|
|
|
|
# include <unistd.h>
|
|
|
|
#endif
|
2012-10-31 15:52:16 +00:00
|
|
|
|
2014-02-28 20:31:31 +00:00
|
|
|
// Returns the number of cores on this machine.
|
|
|
|
static inline int num_cores() {
|
|
|
|
#if defined(SK_BUILD_FOR_WIN32)
|
|
|
|
SYSTEM_INFO sysinfo;
|
|
|
|
GetSystemInfo(&sysinfo);
|
|
|
|
return sysinfo.dwNumberOfProcessors;
|
|
|
|
#elif defined(SK_BUILD_FOR_UNIX) || defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_ANDROID)
|
|
|
|
return sysconf(_SC_NPROCESSORS_ONLN);
|
|
|
|
#else
|
|
|
|
return 1;
|
|
|
|
#endif
|
|
|
|
}
|
2012-10-31 15:52:16 +00:00
|
|
|
|
2014-02-28 20:31:31 +00:00
|
|
|
template <typename T>
|
|
|
|
class SkTThreadPool {
|
2012-10-31 15:52:16 +00:00
|
|
|
public:
|
|
|
|
/**
|
2013-04-22 15:23:14 +00:00
|
|
|
* Create a threadpool with count threads, or one thread per core if kThreadPerCore.
|
2012-10-31 15:52:16 +00:00
|
|
|
*/
|
2013-04-22 15:23:14 +00:00
|
|
|
static const int kThreadPerCore = -1;
|
2014-02-28 20:31:31 +00:00
|
|
|
explicit SkTThreadPool(int count);
|
|
|
|
~SkTThreadPool();
|
2012-10-31 15:52:16 +00:00
|
|
|
|
|
|
|
/**
|
2014-02-28 20:31:31 +00:00
|
|
|
* Queues up an SkRunnable to run when a thread is available, or synchronously if count is 0.
|
|
|
|
* Does not take ownership. NULL is a safe no-op. If T is not void, the runnable will be passed
|
|
|
|
* a reference to a T on the thread's local stack.
|
2012-10-31 15:52:16 +00:00
|
|
|
*/
|
2014-02-28 20:31:31 +00:00
|
|
|
void add(SkTRunnable<T>*);
|
2012-10-31 15:52:16 +00:00
|
|
|
|
2013-10-10 18:49:04 +00:00
|
|
|
/**
|
|
|
|
* Block until all added SkRunnables have completed. Once called, calling add() is undefined.
|
|
|
|
*/
|
|
|
|
void wait();
|
|
|
|
|
2012-10-31 15:52:16 +00:00
|
|
|
private:
|
|
|
|
struct LinkedRunnable {
|
2014-02-28 20:31:31 +00:00
|
|
|
SkTRunnable<T>* fRunnable; // Unowned.
|
2012-12-03 14:54:59 +00:00
|
|
|
SK_DECLARE_INTERNAL_LLIST_INTERFACE(LinkedRunnable);
|
2012-10-31 15:52:16 +00:00
|
|
|
};
|
|
|
|
|
SkThreadPool: allow for Runnables that add other Runnables to the pool.
There's a scenario that we're currently not allowing for, but I'd really like to use in DM:
1) client calls add(SomeRunnable*) several times
2) client calls wait()
3) any of the runnables added by the client _themselves_ call add(SomeOtherRunnable*)
4-inf) maybe those SomeOtherRunnables too call add(SomeCrazyThirdRunnable*), etc.
Right now in this scenario we'll assert in debug mode in step 3) when we call
add() and we're waiting to stop, and do strange unspecified things in release
mode.
The old threadpool had basically two states: running, and waiting to stop. If
a thread saw we were waiting to stop and the queue was empty, that thread shut
down. This wasn't accounting for any work that other threads might be doing;
potentially they were about to add to the queue.
So now we have three states: running, waiting, and halting. When the client
calls wait() (or the destructor triggers), we move into waiting. When a thread
notices we're _really_ done, that is, have an empty queue and there are no
active threads, we move into halting. The halting state actually triggers the
threads to stop, which wait() is patiently join()ing on.
BUG=
R=bungeman@google.com, bsalomon@google.com
Author: mtklein@google.com
Review URL: https://codereview.chromium.org/26389005
git-svn-id: http://skia.googlecode.com/svn/trunk@11852 2bbb7eff-a529-9590-31e7-b0007b416f81
2013-10-18 14:19:19 +00:00
|
|
|
enum State {
|
|
|
|
kRunning_State, // Normal case. We've been constructed and no one has called wait().
|
|
|
|
kWaiting_State, // wait has been called, but there still might be work to do or being done.
|
|
|
|
kHalting_State, // There's no work to do and no thread is busy. All threads can shut down.
|
|
|
|
};
|
|
|
|
|
|
|
|
SkTInternalLList<LinkedRunnable> fQueue;
|
|
|
|
SkCondVar fReady;
|
|
|
|
SkTDArray<SkThread*> fThreads;
|
|
|
|
State fState;
|
|
|
|
int fBusyThreads;
|
2012-10-31 15:52:16 +00:00
|
|
|
|
|
|
|
static void Loop(void*); // Static because we pass in this.
|
|
|
|
};
|
|
|
|
|
2014-02-28 20:31:31 +00:00
|
|
|
template <typename T>
|
|
|
|
SkTThreadPool<T>::SkTThreadPool(int count) : fState(kRunning_State), fBusyThreads(0) {
|
|
|
|
if (count < 0) {
|
|
|
|
count = num_cores();
|
|
|
|
}
|
|
|
|
// Create count threads, all running SkTThreadPool::Loop.
|
|
|
|
for (int i = 0; i < count; i++) {
|
|
|
|
SkThread* thread = SkNEW_ARGS(SkThread, (&SkTThreadPool::Loop, this));
|
|
|
|
*fThreads.append() = thread;
|
|
|
|
thread->start();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
SkTThreadPool<T>::~SkTThreadPool() {
|
|
|
|
if (kRunning_State == fState) {
|
|
|
|
this->wait();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace SkThreadPoolPrivate {
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
struct ThreadLocal {
|
|
|
|
void run(SkTRunnable<T>* r) { r->run(data); }
|
|
|
|
T data;
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct ThreadLocal<void> {
|
|
|
|
void run(SkTRunnable<void>* r) { r->run(); }
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace SkThreadPoolPrivate
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
void SkTThreadPool<T>::add(SkTRunnable<T>* r) {
|
|
|
|
if (r == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fThreads.isEmpty()) {
|
|
|
|
SkThreadPoolPrivate::ThreadLocal<T> threadLocal;
|
|
|
|
threadLocal.run(r);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
LinkedRunnable* linkedRunnable = SkNEW(LinkedRunnable);
|
|
|
|
linkedRunnable->fRunnable = r;
|
|
|
|
fReady.lock();
|
|
|
|
SkASSERT(fState != kHalting_State); // Shouldn't be able to add work when we're halting.
|
|
|
|
fQueue.addToHead(linkedRunnable);
|
|
|
|
fReady.signal();
|
|
|
|
fReady.unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
void SkTThreadPool<T>::wait() {
|
|
|
|
fReady.lock();
|
|
|
|
fState = kWaiting_State;
|
|
|
|
fReady.broadcast();
|
|
|
|
fReady.unlock();
|
|
|
|
|
|
|
|
// Wait for all threads to stop.
|
|
|
|
for (int i = 0; i < fThreads.count(); i++) {
|
|
|
|
fThreads[i]->join();
|
|
|
|
SkDELETE(fThreads[i]);
|
|
|
|
}
|
|
|
|
SkASSERT(fQueue.isEmpty());
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
/*static*/ void SkTThreadPool<T>::Loop(void* arg) {
|
|
|
|
// The SkTThreadPool passes itself as arg to each thread as they're created.
|
|
|
|
SkTThreadPool<T>* pool = static_cast<SkTThreadPool<T>*>(arg);
|
|
|
|
SkThreadPoolPrivate::ThreadLocal<T> threadLocal;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
// We have to be holding the lock to read the queue and to call wait.
|
|
|
|
pool->fReady.lock();
|
|
|
|
while(pool->fQueue.isEmpty()) {
|
|
|
|
// Does the client want to stop and are all the threads ready to stop?
|
|
|
|
// If so, we move into the halting state, and whack all the threads so they notice.
|
|
|
|
if (kWaiting_State == pool->fState && pool->fBusyThreads == 0) {
|
|
|
|
pool->fState = kHalting_State;
|
|
|
|
pool->fReady.broadcast();
|
|
|
|
}
|
|
|
|
// Any time we find ourselves in the halting state, it's quitting time.
|
|
|
|
if (kHalting_State == pool->fState) {
|
|
|
|
pool->fReady.unlock();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// wait yields the lock while waiting, but will have it again when awoken.
|
|
|
|
pool->fReady.wait();
|
|
|
|
}
|
|
|
|
// We've got the lock back here, no matter if we ran wait or not.
|
|
|
|
|
|
|
|
// The queue is not empty, so we have something to run. Claim it.
|
|
|
|
LinkedRunnable* r = pool->fQueue.tail();
|
|
|
|
|
|
|
|
pool->fQueue.remove(r);
|
|
|
|
|
|
|
|
// Having claimed our SkRunnable, we now give up the lock while we run it.
|
|
|
|
// Otherwise, we'd only ever do work on one thread at a time, which rather
|
|
|
|
// defeats the point of this code.
|
|
|
|
pool->fBusyThreads++;
|
|
|
|
pool->fReady.unlock();
|
|
|
|
|
|
|
|
// OK, now really do the work.
|
|
|
|
threadLocal.run(r->fRunnable);
|
|
|
|
SkDELETE(r);
|
|
|
|
|
|
|
|
// Let everyone know we're not busy.
|
|
|
|
pool->fReady.lock();
|
|
|
|
pool->fBusyThreads--;
|
|
|
|
pool->fReady.unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
SkASSERT(false); // Unreachable. The only exit happens when pool->fState is kHalting_State.
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef SkTThreadPool<void> SkThreadPool;
|
|
|
|
|
2012-10-31 15:52:16 +00:00
|
|
|
#endif
|