Fewer atomic ops in debug with SkBufferHead.

In debug builds an assert would do an atomic load to assert the
reference count was greater than zero, then a fetch_add would access the
value again to do the reference counting. Instead just assert in debug
on the value produced by the reference counting. This both improves
debug performance and (more importantly) makes the debug asserts correct
instead of mearly opprotunistic.

Change-Id: Ic4ce788930d2564b5f86ab0e09fcd66006c8b73d
Reviewed-on: https://skia-review.googlesource.com/55880
Reviewed-by: Mike Klein <mtklein@chromium.org>
Commit-Queue: Ben Wagner <bungeman@google.com>
This commit is contained in:
Ben Wagner 2017-10-05 13:10:51 -04:00 committed by Skia Commit-Bot
parent 38ace8a133
commit 04eb02f405

View File

@ -82,14 +82,14 @@ struct SkBufferHead {
}
void ref() const {
SkASSERT(fRefCnt.load(std::memory_order_relaxed) > 0);
(void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
SkAssertResult(fRefCnt.fetch_add(+1, std::memory_order_relaxed));
}
void unref() const {
SkASSERT(fRefCnt.load(std::memory_order_relaxed) > 0);
// A release here acts in place of all releases we "should" have been doing in ref().
if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
int32_t oldRefCnt = fRefCnt.fetch_add(-1, std::memory_order_acq_rel);
SkASSERT(oldRefCnt);
if (1 == oldRefCnt) {
// Like unique(), the acquire is only needed on success.
SkBufferBlock* block = fBlock.fNext;
sk_free((void*)this);