SkPath::fFirstDirection: seq-cst -> relaxed
We landed this originally with lazily-correct sequentially-consistent memory order. It turns out that's regressed performance, we think particularly when recording paths. We also think there's no need for anything but relaxed memory order here. We should see this chart go down if all goes well: https://perf.skia.org/#4329 There are also Chrome performance charts to watch in the linked bug. BUG=chromium:537700 CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-TSAN-Trybot,Test-Ubuntu-GCC-Golo-GPU-GT610-x86_64-Release-TSAN No public API changes. TBR=reed@google.com Review URL: https://codereview.chromium.org/1393833003
This commit is contained in:
parent
ab8296b48a
commit
bf90520f63
@ -938,13 +938,12 @@ private:
|
||||
kCurrent_Version = 1
|
||||
};
|
||||
|
||||
SkAutoTUnref<SkPathRef> fPathRef;
|
||||
|
||||
int fLastMoveToIndex;
|
||||
uint8_t fFillType;
|
||||
mutable uint8_t fConvexity;
|
||||
mutable SkAtomic<uint8_t> fFirstDirection; // SkPathPriv::FirstDirection
|
||||
mutable SkBool8 fIsVolatile;
|
||||
SkAutoTUnref<SkPathRef> fPathRef;
|
||||
int fLastMoveToIndex;
|
||||
uint8_t fFillType;
|
||||
mutable uint8_t fConvexity;
|
||||
mutable SkAtomic<uint8_t, sk_memory_order_relaxed> fFirstDirection;// SkPathPriv::FirstDirection
|
||||
mutable SkBool8 fIsVolatile;
|
||||
|
||||
/** Resets all fields other than fPathRef to their initial 'empty' values.
|
||||
* Assumes the caller has already emptied fPathRef.
|
||||
|
@ -42,43 +42,44 @@ T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst);
|
||||
|
||||
// A little wrapper class for small T (think, builtins: int, float, void*) to
|
||||
// ensure they're always used atomically. This is our stand-in for std::atomic<T>.
|
||||
template <typename T>
|
||||
// !!! Please _really_ know what you're doing if you change default_memory_order. !!!
|
||||
template <typename T, sk_memory_order default_memory_order = sk_memory_order_seq_cst>
|
||||
class SkAtomic : SkNoncopyable {
|
||||
public:
|
||||
SkAtomic() {}
|
||||
explicit SkAtomic(const T& val) : fVal(val) {}
|
||||
|
||||
// It is essential we return by value rather than by const&. fVal may change at any time.
|
||||
T load(sk_memory_order mo = sk_memory_order_seq_cst) const {
|
||||
T load(sk_memory_order mo = default_memory_order) const {
|
||||
return sk_atomic_load(&fVal, mo);
|
||||
}
|
||||
|
||||
void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
|
||||
void store(const T& val, sk_memory_order mo = default_memory_order) {
|
||||
sk_atomic_store(&fVal, val, mo);
|
||||
}
|
||||
|
||||
// Alias for .load(sk_memory_order_seq_cst).
|
||||
// Alias for .load(default_memory_order).
|
||||
operator T() const {
|
||||
return this->load();
|
||||
}
|
||||
|
||||
// Alias for .store(v, sk_memory_order_seq_cst).
|
||||
// Alias for .store(v, default_memory_order).
|
||||
T operator=(const T& v) {
|
||||
this->store(v);
|
||||
return v;
|
||||
}
|
||||
|
||||
T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
|
||||
T fetch_add(const T& val, sk_memory_order mo = default_memory_order) {
|
||||
return sk_atomic_fetch_add(&fVal, val, mo);
|
||||
}
|
||||
|
||||
T fetch_sub(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
|
||||
T fetch_sub(const T& val, sk_memory_order mo = default_memory_order) {
|
||||
return sk_atomic_fetch_sub(&fVal, val, mo);
|
||||
}
|
||||
|
||||
bool compare_exchange(T* expected, const T& desired,
|
||||
sk_memory_order success = sk_memory_order_seq_cst,
|
||||
sk_memory_order failure = sk_memory_order_seq_cst) {
|
||||
sk_memory_order success = default_memory_order,
|
||||
sk_memory_order failure = default_memory_order) {
|
||||
return sk_atomic_compare_exchange(&fVal, expected, desired, success, failure);
|
||||
}
|
||||
private:
|
||||
|
Loading…
Reference in New Issue
Block a user