replace SkSharedMutex

I am debugging an issue with SkSharedMutex and noticed
how sparsely it is used.  That got me curious to see if
we can replace it with a std::shared_mutex (from C++17).

Bug: skia:10177
Change-Id: I1ce4d2a5897af198d6ae5fb850548ff917a58f50
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/285691
Reviewed-by: Ben Wagner <bungeman@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
This commit is contained in:
Mike Klein 2020-04-27 12:46:40 -05:00 committed by Skia Commit-Bot
parent a9379fb8ed
commit cfdc07aa0e
7 changed files with 6 additions and 554 deletions

View File

@ -8,7 +8,6 @@
#include "include/core/SkString.h"
#include "include/private/SkMutex.h"
#include "include/private/SkSpinlock.h"
#include "src/core/SkSharedMutex.h"
template <typename Mutex>
class MutexBench : public Benchmark {
@ -36,32 +35,5 @@ private:
Mutex fMu;
};
class SharedBench : public Benchmark {
public:
bool isSuitableFor(Backend backend) override {
return backend == kNonRendering_Backend;
}
protected:
const char* onGetName() override {
return "SkSharedMutexSharedUncontendedBenchmark";
}
void onDraw(int loops, SkCanvas*) override {
for (int i = 0; i < loops; i++) {
fMu.acquireShared();
fMu.releaseShared();
}
}
private:
typedef Benchmark INHERITED;
SkSharedMutex fMu;
};
///////////////////////////////////////////////////////////////////////////////
DEF_BENCH( return new MutexBench<SkSharedMutex>(SkString("SkSharedMutex")); )
DEF_BENCH( return new MutexBench<SkMutex>(SkString("SkMutex")); )
DEF_BENCH( return new MutexBench<SkSpinlock>(SkString("SkSpinlock")); )
DEF_BENCH( return new SharedBench; )

View File

@ -333,8 +333,6 @@ skia_core_sources = [
"$_src/core/SkScan_Path.cpp",
"$_src/core/SkScopeExit.h",
"$_src/core/SkSemaphore.cpp",
"$_src/core/SkSharedMutex.cpp",
"$_src/core/SkSharedMutex.h",
"$_src/core/SkSpan.h",
"$_src/core/SkSpecialImage.cpp",
"$_src/core/SkSpecialImage.h",

View File

@ -262,7 +262,6 @@ tests_sources = [
"$_tests/SkSLSPIRVTest.cpp",
"$_tests/SkScalerCacheTest.cpp",
"$_tests/SkShaperJSONWriterTest.cpp",
"$_tests/SkSharedMutexTest.cpp",
"$_tests/SkStrikeCacheTest.cpp",
"$_tests/SkUTFTest.cpp",
"$_tests/SkVMTest.cpp",

View File

@ -1,358 +0,0 @@
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "src/core/SkSharedMutex.h"
#include "include/core/SkTypes.h"
#include "include/private/SkSemaphore.h"
#if !defined(__has_feature)
#define __has_feature(x) 0
#endif
#if __has_feature(thread_sanitizer)
/* Report that a lock has been created at address "lock". */
#define ANNOTATE_RWLOCK_CREATE(lock) \
AnnotateRWLockCreate(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" is about to be destroyed. */
#define ANNOTATE_RWLOCK_DESTROY(lock) \
AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" has been acquired.
is_w=1 for writer lock, is_w=0 for reader lock. */
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
/* Report that the lock at address "lock" is about to be released. */
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
#if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK)
#if defined(__GNUC__)
#define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
#else
/* TODO(glider): for Windows support we may want to change this macro in order
to prepend __declspec(selectany) to the annotations' declarations. */
#error weak annotations are not supported for your compiler
#endif
#else
#define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
#endif
extern "C" {
void AnnotateRWLockCreate(
const char *file, int line,
const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
void AnnotateRWLockDestroy(
const char *file, int line,
const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
void AnnotateRWLockAcquired(
const char *file, int line,
const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
void AnnotateRWLockReleased(
const char *file, int line,
const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
}
#else
#define ANNOTATE_RWLOCK_CREATE(lock)
#define ANNOTATE_RWLOCK_DESTROY(lock)
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
#endif
#ifdef SK_DEBUG
#include "include/private/SkTDArray.h"
#include "include/private/SkThreadID.h"
class SkSharedMutex::ThreadIDSet {
public:
// Returns true if threadID is in the set.
bool find(SkThreadID threadID) const {
for (auto& t : fThreadIDs) {
if (t == threadID) return true;
}
return false;
}
// Returns true if did not already exist.
bool tryAdd(SkThreadID threadID) {
for (auto& t : fThreadIDs) {
if (t == threadID) return false;
}
fThreadIDs.append(1, &threadID);
return true;
}
// Returns true if already exists in Set.
bool tryRemove(SkThreadID threadID) {
for (int i = 0; i < fThreadIDs.count(); ++i) {
if (fThreadIDs[i] == threadID) {
fThreadIDs.remove(i);
return true;
}
}
return false;
}
void swap(ThreadIDSet& other) {
fThreadIDs.swap(other.fThreadIDs);
}
int count() const {
return fThreadIDs.count();
}
private:
SkTDArray<SkThreadID> fThreadIDs;
};
SkSharedMutex::SkSharedMutex()
: fCurrentShared(new ThreadIDSet)
, fWaitingExclusive(new ThreadIDSet)
, fWaitingShared(new ThreadIDSet){
ANNOTATE_RWLOCK_CREATE(this);
}
SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
void SkSharedMutex::acquire() {
SkThreadID threadID(SkGetThreadID());
int currentSharedCount;
int waitingExclusiveCount;
{
SkAutoMutexExclusive l(fMu);
SkASSERTF(!fCurrentShared->find(threadID),
"Thread %lx already has an shared lock\n", threadID);
if (!fWaitingExclusive->tryAdd(threadID)) {
SkDEBUGFAILF("Thread %lx already has an exclusive lock\n", threadID);
}
currentSharedCount = fCurrentShared->count();
waitingExclusiveCount = fWaitingExclusive->count();
}
if (currentSharedCount > 0 || waitingExclusiveCount > 1) {
fExclusiveQueue.wait();
}
ANNOTATE_RWLOCK_ACQUIRED(this, 1);
}
// Implementation Detail:
// The shared threads need two seperate queues to keep the threads that were added after the
// exclusive lock separate from the threads added before.
void SkSharedMutex::release() {
ANNOTATE_RWLOCK_RELEASED(this, 1);
SkThreadID threadID(SkGetThreadID());
int sharedWaitingCount;
int exclusiveWaitingCount;
int sharedQueueSelect;
{
SkAutoMutexExclusive l(fMu);
SkASSERT(0 == fCurrentShared->count());
if (!fWaitingExclusive->tryRemove(threadID)) {
SkDEBUGFAILF("Thread %lx did not have the lock held.\n", threadID);
}
exclusiveWaitingCount = fWaitingExclusive->count();
sharedWaitingCount = fWaitingShared->count();
fWaitingShared.swap(fCurrentShared);
sharedQueueSelect = fSharedQueueSelect;
if (sharedWaitingCount > 0) {
fSharedQueueSelect = 1 - fSharedQueueSelect;
}
}
if (sharedWaitingCount > 0) {
fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount);
} else if (exclusiveWaitingCount > 0) {
fExclusiveQueue.signal();
}
}
void SkSharedMutex::assertHeld() const {
SkThreadID threadID(SkGetThreadID());
SkAutoMutexExclusive l(fMu);
SkASSERT(0 == fCurrentShared->count());
SkASSERT(fWaitingExclusive->find(threadID));
}
void SkSharedMutex::acquireShared() {
SkThreadID threadID(SkGetThreadID());
int exclusiveWaitingCount;
int sharedQueueSelect;
{
SkAutoMutexExclusive l(fMu);
exclusiveWaitingCount = fWaitingExclusive->count();
if (exclusiveWaitingCount > 0) {
if (!fWaitingShared->tryAdd(threadID)) {
SkDEBUGFAILF("Thread %lx was already waiting!\n", threadID);
}
} else {
if (!fCurrentShared->tryAdd(threadID)) {
SkDEBUGFAILF("Thread %lx already holds a shared lock!\n", threadID);
}
}
sharedQueueSelect = fSharedQueueSelect;
}
if (exclusiveWaitingCount > 0) {
fSharedQueue[sharedQueueSelect].wait();
}
ANNOTATE_RWLOCK_ACQUIRED(this, 0);
}
void SkSharedMutex::releaseShared() {
ANNOTATE_RWLOCK_RELEASED(this, 0);
SkThreadID threadID(SkGetThreadID());
int currentSharedCount;
int waitingExclusiveCount;
{
SkAutoMutexExclusive l(fMu);
if (!fCurrentShared->tryRemove(threadID)) {
SkDEBUGFAILF("Thread %lx does not hold a shared lock.\n", threadID);
}
currentSharedCount = fCurrentShared->count();
waitingExclusiveCount = fWaitingExclusive->count();
}
if (0 == currentSharedCount && waitingExclusiveCount > 0) {
fExclusiveQueue.signal();
}
}
void SkSharedMutex::assertHeldShared() const {
SkThreadID threadID(SkGetThreadID());
SkAutoMutexExclusive l(fMu);
SkASSERT(fCurrentShared->find(threadID));
}
#else
// The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic.
// These three counts must be the same size, so each gets 10 bits. The 10 bits represent
// the log of the count which is 1024.
//
// The three counts held in fQueueCounts are:
// * Shared - the number of shared lock holders currently running.
// * WaitingExclusive - the number of threads waiting for an exclusive lock.
// * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread
// to finish.
static const int kLogThreadCount = 10;
enum {
kSharedOffset = (0 * kLogThreadCount),
kWaitingExlusiveOffset = (1 * kLogThreadCount),
kWaitingSharedOffset = (2 * kLogThreadCount),
kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset,
kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset,
kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset,
};
SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
void SkSharedMutex::acquire() {
// Increment the count of exclusive queue waiters.
int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
std::memory_order_acquire);
// If there are no other exclusive waiters and no shared threads are running then run
// else wait.
if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
fExclusiveQueue.wait();
}
ANNOTATE_RWLOCK_ACQUIRED(this, 1);
}
void SkSharedMutex::release() {
ANNOTATE_RWLOCK_RELEASED(this, 1);
int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
int32_t waitingShared;
int32_t newQueueCounts;
do {
newQueueCounts = oldQueueCounts;
// Decrement exclusive waiters.
newQueueCounts -= 1 << kWaitingExlusiveOffset;
// The number of threads waiting to acquire a shared lock.
waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
// If there are any move the counts of all the shared waiters to actual shared. They are
// going to run next.
if (waitingShared > 0) {
// Set waiting shared to zero.
newQueueCounts &= ~kWaitingSharedMask;
// Because this is the exclusive release, then there are zero readers. So, the bits
// for shared locks should be zero. Since those bits are zero, we can just |= in the
// waitingShared count instead of clearing with an &= and then |= the count.
newQueueCounts |= waitingShared << kSharedOffset;
}
} while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
std::memory_order_release,
std::memory_order_relaxed));
if (waitingShared > 0) {
// Run all the shared.
fSharedQueue.signal(waitingShared);
} else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
// Run a single exclusive waiter.
fExclusiveQueue.signal();
}
}
void SkSharedMutex::acquireShared() {
int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
int32_t newQueueCounts;
do {
newQueueCounts = oldQueueCounts;
// If there are waiting exclusives then this shared lock waits else it runs.
if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
newQueueCounts += 1 << kWaitingSharedOffset;
} else {
newQueueCounts += 1 << kSharedOffset;
}
} while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
std::memory_order_acquire,
std::memory_order_relaxed));
// If there are waiting exclusives, then this shared waits until after it runs.
if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
fSharedQueue.wait();
}
ANNOTATE_RWLOCK_ACQUIRED(this, 0);
}
void SkSharedMutex::releaseShared() {
ANNOTATE_RWLOCK_RELEASED(this, 0);
// Decrement the shared count.
int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
std::memory_order_release);
// If shared count is going to zero (because the old count == 1) and there are exclusive
// waiters, then run a single exclusive waiter.
if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
&& (oldQueueCounts & kWaitingExclusiveMask) > 0) {
fExclusiveQueue.signal();
}
}
#endif

View File

@ -1,106 +0,0 @@
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkSharedLock_DEFINED
#define SkSharedLock_DEFINED
#include "include/core/SkTypes.h"
#include "include/private/SkMacros.h"
#include "include/private/SkSemaphore.h"
#include "include/private/SkThreadAnnotations.h"
#include <atomic>
#ifdef SK_DEBUG
#include "include/private/SkMutex.h"
#include <memory>
#endif // SK_DEBUG
// There are two shared lock implementations one debug the other is high performance. They implement
// an interface similar to pthread's rwlocks.
// This is a shared lock implementation similar to pthreads rwlocks. The high performance
// implementation is cribbed from Preshing's article:
// http://preshing.com/20150316/semaphores-are-surprisingly-versatile/
//
// This lock does not obey strict queue ordering. It will always alternate between readers and
// a single writer.
class SK_CAPABILITY("mutex") SkSharedMutex {
public:
SkSharedMutex();
~SkSharedMutex();
// Acquire lock for exclusive use.
void acquire() SK_ACQUIRE();
// Release lock for exclusive use.
void release() SK_RELEASE_CAPABILITY();
// Fail if exclusive is not held.
void assertHeld() const SK_ASSERT_CAPABILITY(this);
// Acquire lock for shared use.
void acquireShared() SK_ACQUIRE_SHARED();
// Release lock for shared use.
void releaseShared() SK_RELEASE_SHARED_CAPABILITY();
// Fail if shared lock not held.
void assertHeldShared() const SK_ASSERT_SHARED_CAPABILITY(this);
private:
#ifdef SK_DEBUG
class ThreadIDSet;
std::unique_ptr<ThreadIDSet> fCurrentShared;
std::unique_ptr<ThreadIDSet> fWaitingExclusive;
std::unique_ptr<ThreadIDSet> fWaitingShared;
int fSharedQueueSelect{0};
mutable SkMutex fMu;
SkSemaphore fSharedQueue[2];
SkSemaphore fExclusiveQueue;
#else
std::atomic<int32_t> fQueueCounts;
SkSemaphore fSharedQueue;
SkSemaphore fExclusiveQueue;
#endif // SK_DEBUG
};
#ifndef SK_DEBUG
inline void SkSharedMutex::assertHeld() const {};
inline void SkSharedMutex::assertHeldShared() const {};
#endif // SK_DEBUG
class SK_SCOPED_CAPABILITY SkAutoSharedMutexExclusive {
public:
explicit SkAutoSharedMutexExclusive(SkSharedMutex& lock) SK_ACQUIRE(lock)
: fLock(lock) {
lock.acquire();
}
~SkAutoSharedMutexExclusive() SK_RELEASE_CAPABILITY() { fLock.release(); }
private:
SkSharedMutex& fLock;
};
#define SkAutoSharedMutexExclusive(...) SK_REQUIRE_LOCAL_VAR(SkAutoSharedMutexExclusive)
class SK_SCOPED_CAPABILITY SkAutoSharedMutexShared {
public:
explicit SkAutoSharedMutexShared(SkSharedMutex& lock) SK_ACQUIRE_SHARED(lock)
: fLock(lock) {
lock.acquireShared();
}
// You would think this should be SK_RELEASE_SHARED_CAPABILITY, but SK_SCOPED_CAPABILITY
// doesn't fully understand the difference between shared and exclusive.
// Please review https://reviews.llvm.org/D52578 for more information.
~SkAutoSharedMutexShared() SK_RELEASE_CAPABILITY() { fLock.releaseShared(); }
private:
SkSharedMutex& fLock;
};
#define SkAutoSharedMutexShared(...) SK_REQUIRE_LOCAL_VAR(SkAutoSharedMutexShared)
#endif // SkSharedLock_DEFINED

View File

@ -22,7 +22,6 @@
#include "src/core/SkMaskGamma.h"
#include "src/core/SkRasterClip.h"
#include "src/core/SkScalerContext.h"
#include "src/core/SkSharedMutex.h"
#include "src/ports/SkScalerContext_win_dw.h"
#include "src/ports/SkTypeface_win_dw.h"
#include "src/sfnt/SkOTTable_EBLC.h"
@ -38,19 +37,21 @@
#include <dwrite.h>
#include <dwrite_1.h>
#include <dwrite_3.h>
#include <mutex>
#include <shared_mutex>
/* Note:
* In versions 8 and 8.1 of Windows, some calls in DWrite are not thread safe.
* The mutex returned from get_dwrite_factory_mutex() protects the calls that are
* problematic.
*/
static SkSharedMutex& get_dwrite_factory_mutex() {
static SkSharedMutex mutex;
static std::shared_mutex& get_dwrite_factory_mutex() {
static std::shared_mutex mutex;
return mutex;
}
typedef SkAutoSharedMutexExclusive Exclusive;
typedef SkAutoSharedMutexShared Shared;
typedef std::unique_lock<std::shared_mutex> Exclusive;
typedef std::shared_lock<std::shared_mutex> Shared;
static bool isLCD(const SkScalerContextRec& rec) {
return SkMask::kLCD16_Format == rec.fMaskFormat;

View File

@ -1,54 +0,0 @@
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "src/core/SkSharedMutex.h"
#include "src/core/SkTaskGroup.h"
#include "tests/Test.h"
DEF_TEST(SkSharedMutexBasic, r) {
SkSharedMutex sm;
sm.acquire();
sm.assertHeld();
sm.release();
sm.acquireShared();
sm.assertHeldShared();
sm.releaseShared();
}
DEF_TEST(SkSharedMutexMultiThreaded, r) {
SkSharedMutex sm;
static const int kSharedSize = 10;
int shared[kSharedSize];
int value = 0;
for (int i = 0; i < kSharedSize; ++i) {
shared[i] = 0;
}
SkTaskGroup().batch(8, [&](int threadIndex) {
if (threadIndex % 4 != 0) {
for (int c = 0; c < 100000; ++c) {
sm.acquireShared();
sm.assertHeldShared();
int v = shared[0];
for (int i = 1; i < kSharedSize; ++i) {
REPORTER_ASSERT(r, v == shared[i]);
}
sm.releaseShared();
}
} else {
for (int c = 0; c < 100000; ++c) {
sm.acquire();
sm.assertHeld();
value += 1;
for (int i = 0; i < kSharedSize; ++i) {
shared[i] = value;
}
sm.release();
}
}
});
}