From 1d24eb20e77f0b291a6dcf6aa054b74f41fa1af2 Mon Sep 17 00:00:00 2001 From: "commit-bot@chromium.org" Date: Thu, 24 Apr 2014 15:33:54 +0000 Subject: [PATCH] teach TSAN about SkSpinlock, SkRefCnt, and SkOnce BUG=skia: R=bsalomon@google.com, bungeman@google.com, mtklein@google.com Author: mtklein@chromium.org Review URL: https://codereview.chromium.org/247813005 git-svn-id: http://skia.googlecode.com/svn/trunk@14353 2bbb7eff-a529-9590-31e7-b0007b416f81 --- include/core/SkDynamicAnnotations.h | 14 ++++++++++++++ include/core/SkOnce.h | 4 ++++ include/core/SkRefCnt.h | 14 ++++++++++---- 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/include/core/SkDynamicAnnotations.h b/include/core/SkDynamicAnnotations.h index 6d21cddb94..e4493c8e65 100644 --- a/include/core/SkDynamicAnnotations.h +++ b/include/core/SkDynamicAnnotations.h @@ -19,6 +19,10 @@ extern "C" { // TSAN provides these hooks. void AnnotateIgnoreReadsBegin(const char* file, int line); void AnnotateIgnoreReadsEnd(const char* file, int line); +void AnnotateHappensBefore(const char* file, int line, const volatile void* ptr); +void AnnotateHappensAfter(const char* file, int line, const volatile void* ptr); +void AnnotateRWLockAcquired(const char* file, int line, const volatile void* lock, long is_w); +void AnnotateRWLockReleased(const char* file, int line, const volatile void* lock, long is_w); } // extern "C" // SK_ANNOTATE_UNPROTECTED_READ can wrap any variable read to tell TSAN to ignore that it appears to @@ -37,9 +41,19 @@ inline T SK_ANNOTATE_UNPROTECTED_READ(const volatile T& x) { return read; } +#define SK_ANNOTATE_HAPPENS_BEFORE(obj) AnnotateHappensBefore(__FILE__, __LINE__, obj) +#define SK_ANNOTATE_HAPPENS_AFTER(obj) AnnotateHappensAfter(__FILE__, __LINE__, obj) + +#define SK_ANNOTATE_RWLOCK_ACQUIRED(lock, w) AnnotateRWLockAcquired(__FILE__, __LINE__, lock, w) +#define SK_ANNOTATE_RWLOCK_RELEASED(lock, w) AnnotateRWLockReleased(__FILE__, __LINE__, lock, w) + #else // !DYNAMIC_ANNOTATIONS_ENABLED #define SK_ANNOTATE_UNPROTECTED_READ(x) (x) +#define SK_ANNOTATE_HAPPENS_BEFORE(obj) +#define SK_ANNOTATE_HAPPENS_AFTER(obj) +#define SK_ANNOTATE_RWLOCK_ACQUIRED(lock, w) +#define SK_ANNOTATE_RWLOCK_RELEASED(lock, w) #endif diff --git a/include/core/SkOnce.h b/include/core/SkOnce.h index d5330b9408..a42e7022f5 100644 --- a/include/core/SkOnce.h +++ b/include/core/SkOnce.h @@ -54,10 +54,12 @@ struct SkSpinlock { while (!sk_atomic_cas(&thisIsPrivate, 0, 1)) { // spin } + SK_ANNOTATE_RWLOCK_ACQUIRED(this, true); } void release() { SkASSERT(shouldBeZero == 0); + SK_ANNOTATE_RWLOCK_RELEASED(this, true); // This requires a release memory barrier before storing, which sk_atomic_cas guarantees. SkAssertResult(sk_atomic_cas(&thisIsPrivate, 1, 0)); } @@ -145,6 +147,7 @@ static void sk_once_slow(bool* done, Lock* lock, Func f, Arg arg, void (*atExit) // observable whenever we observe *done == true. release_barrier(); *done = true; + SK_ANNOTATE_HAPPENS_BEFORE(done); } } @@ -165,6 +168,7 @@ inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)()) { // happens after f(arg), so by syncing to once->done = true here we're // forcing ourselves to also wait until the effects of f(arg) are readble. acquire_barrier(); + SK_ANNOTATE_HAPPENS_AFTER(done); } template diff --git a/include/core/SkRefCnt.h b/include/core/SkRefCnt.h index 41c78293fa..2ac68ddbcd 100644 --- a/include/core/SkRefCnt.h +++ b/include/core/SkRefCnt.h @@ -37,7 +37,7 @@ public: */ virtual ~SkRefCntBase() { #ifdef SK_DEBUG - SkASSERT(fRefCnt == 1); + SkASSERT(this->unique()); fRefCnt = 0; // illegal value, to catch us if we reuse after delete #endif } @@ -53,6 +53,7 @@ public: // an unproctected read. Generally, don't read fRefCnt, and don't stifle this warning. bool const unique = (1 == SK_ANNOTATE_UNPROTECTED_READ(fRefCnt)); if (unique) { + SK_ANNOTATE_HAPPENS_AFTER(this); // Acquire barrier (L/SL), if not provided by load of fRefCnt. // Prevents user's 'unique' code from happening before decrements. //TODO: issue the barrier. @@ -63,7 +64,7 @@ public: /** Increment the reference count. Must be balanced by a call to unref(). */ void ref() const { - SkASSERT(fRefCnt > 0); + SkASSERT(this->unsafeGetRefCnt() > 0); sk_atomic_inc(&fRefCnt); // No barrier required. } @@ -72,9 +73,11 @@ public: the object needs to have been allocated via new, and not on the stack. */ void unref() const { - SkASSERT(fRefCnt > 0); + SkASSERT(this->unsafeGetRefCnt() > 0); + SK_ANNOTATE_HAPPENS_BEFORE(this); // Release barrier (SL/S), if not provided below. if (sk_atomic_dec(&fRefCnt) == 1) { + SK_ANNOTATE_HAPPENS_AFTER(this); // Acquire barrier (L/SL), if not provided above. // Prevents code in dispose from happening before the decrement. sk_membar_acquire__after_atomic_dec(); @@ -84,7 +87,7 @@ public: #ifdef SK_DEBUG void validate() const { - SkASSERT(fRefCnt > 0); + SkASSERT(this->unsafeGetRefCnt() > 0); } #endif @@ -103,6 +106,9 @@ protected: } private: + // OK for use in asserts, but not much else. + int32_t unsafeGetRefCnt() { return SK_ANNOTATE_UNPROTECTED_READ(fRefCnt); } + /** * Called when the ref count goes to 0. */