Revert of Parallel cache - preliminary (patchset #24 id:460001 of https://codereview.chromium.org/1264103003/ )

Reason for revert:
Breaks DrMemory in the chrome roll.

Original issue's description:
> Parallel cache.
>
> TBR=reed@google.com
>
> BUG=skia:1330,528560
>
> Committed: https://skia.googlesource.com/skia/+/6f2a486040cb25465990196c229feb47e668e87f
>
> Committed: https://skia.googlesource.com/skia/+/bf2988833e5a36c6b430da6fdd2cfebd0015adec
>
> Committed: https://skia.googlesource.com/skia/+/014ffdb01ea5317614a1569efc30c50f06434222

TBR=reed@google.com,mtklein@google.com,mtklein@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=skia:1330,528560

Review URL: https://codereview.chromium.org/1345903002
This commit is contained in:
herb 2015-09-15 15:15:40 -07:00 committed by Commit bot
parent 013e9e3bb0
commit cd7f035974
9 changed files with 190 additions and 334 deletions

View File

@ -29,9 +29,6 @@ void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst);
template <typename T>
T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst);
template <typename T>
T sk_atomic_fetch_sub(T*, T, sk_memory_order = sk_memory_order_seq_cst);
template <typename T>
bool sk_atomic_compare_exchange(T*, T* expected, T desired,
sk_memory_order success = sk_memory_order_seq_cst,
@ -61,10 +58,6 @@ public:
return sk_atomic_fetch_add(&fVal, val, mo);
}
T fetch_sub(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
return sk_atomic_fetch_sub(&fVal, val, mo);
}
bool compare_exchange(T* expected, const T& desired,
sk_memory_order success = sk_memory_order_seq_cst,
sk_memory_order failure = sk_memory_order_seq_cst) {

View File

@ -31,12 +31,6 @@ T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) {
return __atomic_fetch_add(ptr, val, mo);
}
template <typename T>
T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order mo) {
// All values of mo are valid.
return __atomic_fetch_sub(ptr, val, mo);
}
template <typename T>
bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired,
sk_memory_order success,

View File

@ -38,13 +38,6 @@ T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) {
return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo);
}
template <typename T>
T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order mo) {
// All values of mo are valid.
std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
return std::atomic_fetch_sub_explicit(ap, val, (std::memory_order)mo);
}
template <typename T>
bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired,
sk_memory_order success,

View File

@ -45,11 +45,6 @@ T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order) {
return __sync_fetch_and_add(ptr, val);
}
template <typename T>
T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order) {
return __sync_fetch_and_sub(ptr, val);
}
template <typename T>
bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, sk_memory_order, sk_memory_order) {
T prev = __sync_val_compare_and_swap(ptr, *expected, desired);

View File

@ -1477,12 +1477,14 @@ static void D1G_RectClip(const SkDraw1Glyph& state, Sk48Dot16 fx, Sk48Dot16 fy,
bounds = &storage;
}
uint8_t*aa = (uint8_t*)state.fCache->findImage(glyph);
uint8_t* aa = (uint8_t*)glyph.fImage;
if (nullptr == aa) {
return; // can't rasterize glyph
aa = (uint8_t*)state.fCache->findImage(glyph);
if (nullptr == aa) {
return; // can't rasterize glyph
}
}
mask.fRowBytes = glyph.rowBytes();
mask.fFormat = static_cast<SkMask::Format>(glyph.fMaskFormat);
mask.fImage = aa;

View File

@ -47,8 +47,6 @@ class SkGlyph {
uint8_t fMaskFormat;
int8_t fRsbDelta, fLsbDelta; // used by auto-kerning
int8_t fForceBW;
mutable bool fImageIsSet;
mutable bool fPathIsSet;
void initWithGlyphID(uint32_t glyph_id) {
this->initCommon(MakeID(glyph_id));
@ -137,8 +135,6 @@ class SkGlyph {
fPath = nullptr;
fMaskFormat = MASK_FORMAT_UNKNOWN;
fForceBW = 0;
fImageIsSet = false;
fPathIsSet = false;
}
static unsigned ID2Code(uint32_t id) {

View File

@ -8,7 +8,6 @@
#include "SkGlyphCache.h"
#include "SkGlyphCache_Globals.h"
#include "SkGraphics.h"
#include "SkOnce.h"
#include "SkOncePtr.h"
#include "SkPath.h"
#include "SkTemplates.h"
@ -43,90 +42,42 @@ static SkGlyphCache_Globals& get_globals() {
#define kMinAllocAmount ((sizeof(SkGlyph) + kMinGlyphImageSize) * kMinGlyphCount)
SkGlyphCache::SkGlyphCache(SkTypeface* typeface, const SkDescriptor* desc, SkScalerContext* ctx)
: fNext(nullptr)
, fPrev(nullptr)
, fDesc(desc->copy())
, fRefCount(0)
, fGlyphAlloc(kMinAllocAmount)
, fMemoryUsed(sizeof(*this))
: fDesc(desc->copy())
, fScalerContext(ctx)
, fAuxProcList(nullptr) {
, fGlyphAlloc(kMinAllocAmount) {
SkASSERT(typeface);
SkASSERT(desc);
SkASSERT(ctx);
fPrev = fNext = nullptr;
fScalerContext->getFontMetrics(&fFontMetrics);
fMemoryUsed = sizeof(*this);
fAuxProcList = nullptr;
}
SkGlyphCache::~SkGlyphCache() {
fGlyphMap.foreach ([](SkGlyph* g) { delete g->fPath; });
SkDescriptor::Free(fDesc);
delete fScalerContext;
AuxProcRec* rec = fAuxProcList;
while (rec) {
rec->fProc(rec->fData);
AuxProcRec* next = rec->fNext;
delete rec;
rec = next;
}
}
void SkGlyphCache::increaseMemoryUsed(size_t used) {
fMemoryUsed += used;
get_globals().increaseTotalMemoryUsed(used);
}
SkGlyphCache::CharGlyphRec
SkGlyphCache::PackedUnicharIDtoCharGlyphRec(PackedUnicharID packedUnicharID) {
SkFixed x = SkGlyph::SubToFixed(SkGlyph::ID2SubX(packedUnicharID));
SkFixed y = SkGlyph::SubToFixed(SkGlyph::ID2SubY(packedUnicharID));
SkUnichar unichar = SkGlyph::ID2Code(packedUnicharID);
SkAutoMutexAcquire lock(fScalerMutex);
PackedGlyphID packedGlyphID = SkGlyph::MakeID(fScalerContext->charToGlyphID(unichar), x, y);
return {packedUnicharID, packedGlyphID};
this->invokeAndRemoveAuxProcs();
}
SkGlyphCache::CharGlyphRec* SkGlyphCache::getCharGlyphRec(PackedUnicharID packedUnicharID) {
if (nullptr == fPackedUnicharIDToPackedGlyphID.get()) {
fMapMutex.releaseShared();
// Add the map only if there is a call for char -> glyph mapping.
{
SkAutoTAcquire<SkSharedMutex> lock(fMapMutex);
// Now that the cache is locked exclusively, make sure no one added this array
// while unlocked.
if (nullptr == fPackedUnicharIDToPackedGlyphID.get()) {
// Allocate the array.
fPackedUnicharIDToPackedGlyphID.reset(new PackedUnicharIDToPackedGlyphIDMap);
}
fPackedUnicharIDToPackedGlyphID->set(PackedUnicharIDtoCharGlyphRec(packedUnicharID));
// Allocate the array.
fPackedUnicharIDToPackedGlyphID.reset(kHashCount);
// Initialize array to map character and position with the impossible glyph ID. This
// represents no mapping.
for (int i = 0; i <kHashCount; ++i) {
fPackedUnicharIDToPackedGlyphID[i].fPackedUnicharID = SkGlyph::kImpossibleID;
fPackedUnicharIDToPackedGlyphID[i].fPackedGlyphID = 0;
}
fMapMutex.acquireShared();
return fPackedUnicharIDToPackedGlyphID->find(packedUnicharID);
}
CharGlyphRec* answer = fPackedUnicharIDToPackedGlyphID->find(packedUnicharID);
if (nullptr == answer) {
fMapMutex.releaseShared();
// Add a new char -> glyph mapping.
{
SkAutoTAcquire<SkSharedMutex> lock(fMapMutex);
answer = fPackedUnicharIDToPackedGlyphID->find(packedUnicharID);
if (nullptr == answer) {
fPackedUnicharIDToPackedGlyphID->set(
PackedUnicharIDtoCharGlyphRec(packedUnicharID));
}
}
fMapMutex.acquireShared();
return fPackedUnicharIDToPackedGlyphID->find(packedUnicharID);
}
return answer;
return &fPackedUnicharIDToPackedGlyphID[SkChecksum::CheapMix(packedUnicharID) & kHashMask];
}
///////////////////////////////////////////////////////////////////////////////
@ -141,11 +92,15 @@ uint16_t SkGlyphCache::unicharToGlyph(SkUnichar charCode) {
VALIDATE();
PackedUnicharID packedUnicharID = SkGlyph::MakeID(charCode);
const CharGlyphRec& rec = *this->getCharGlyphRec(packedUnicharID);
return SkGlyph::ID2Code(rec.fPackedGlyphID);
if (rec.fPackedUnicharID == packedUnicharID) {
return SkGlyph::ID2Code(rec.fPackedGlyphID);
} else {
return fScalerContext->charToGlyphID(charCode);
}
}
SkUnichar SkGlyphCache::glyphToUnichar(uint16_t glyphID) {
SkAutoMutexAcquire lock(fScalerMutex);
return fScalerContext->glyphIDToChar(glyphID);
}
@ -159,19 +114,19 @@ int SkGlyphCache::countCachedGlyphs() const {
///////////////////////////////////////////////////////////////////////////////
SkGlyph* SkGlyphCache::lookupByChar(SkUnichar charCode, SkFixed x, SkFixed y) {
PackedUnicharID targetUnicharID = SkGlyph::MakeID(charCode, x, y);
CharGlyphRec* rec = this->getCharGlyphRec(targetUnicharID);
PackedGlyphID packedGlyphID = rec->fPackedGlyphID;
return this->lookupByPackedGlyphID(packedGlyphID);
}
const SkGlyph& SkGlyphCache::getUnicharAdvance(SkUnichar charCode) {
VALIDATE();
return *this->lookupByChar(charCode);
}
const SkGlyph& SkGlyphCache::getGlyphIDAdvance(uint16_t glyphID) {
VALIDATE();
PackedGlyphID packedGlyphID = SkGlyph::MakeID(glyphID);
return *this->lookupByPackedGlyphID(packedGlyphID);
}
///////////////////////////////////////////////////////////////////////////////
const SkGlyph& SkGlyphCache::getUnicharMetrics(SkUnichar charCode) {
VALIDATE();
return *this->lookupByChar(charCode);
@ -182,49 +137,6 @@ const SkGlyph& SkGlyphCache::getUnicharMetrics(SkUnichar charCode, SkFixed x, Sk
return *this->lookupByChar(charCode, x, y);
}
///////////////////////////////////////////////////////////////////////////////
SkGlyph* SkGlyphCache::allocateNewGlyph(PackedGlyphID packedGlyphID) {
SkGlyph* glyphPtr;
{
fMapMutex.releaseShared();
{
SkAutoTAcquire<SkSharedMutex> mapLock(fMapMutex);
glyphPtr = fGlyphMap.find(packedGlyphID);
if (nullptr == glyphPtr) {
SkGlyph glyph;
glyph.initGlyphFromCombinedID(packedGlyphID);
{
SkAutoMutexAcquire lock(fScalerMutex);
fScalerContext->getMetrics(&glyph);
this->increaseMemoryUsed(sizeof(SkGlyph));
glyphPtr = fGlyphMap.set(glyph);
} // drop scaler lock
}
} // drop map lock
fMapMutex.acquireShared();
glyphPtr = fGlyphMap.find(packedGlyphID);
}
SkASSERT(glyphPtr->fID != SkGlyph::kImpossibleID);
return glyphPtr;
}
SkGlyph* SkGlyphCache::lookupByPackedGlyphID(PackedGlyphID packedGlyphID) {
SkGlyph* glyph = fGlyphMap.find(packedGlyphID);
if (nullptr == glyph) {
glyph = this->allocateNewGlyph(packedGlyphID);
}
return glyph;
}
const SkGlyph& SkGlyphCache::getGlyphIDAdvance(uint16_t glyphID) {
VALIDATE();
PackedGlyphID packedGlyphID = SkGlyph::MakeID(glyphID);
return *this->lookupByPackedGlyphID(packedGlyphID);
}
const SkGlyph& SkGlyphCache::getGlyphIDMetrics(uint16_t glyphID) {
VALIDATE();
PackedGlyphID packedGlyphID = SkGlyph::MakeID(glyphID);
@ -237,46 +149,74 @@ const SkGlyph& SkGlyphCache::getGlyphIDMetrics(uint16_t glyphID, SkFixed x, SkFi
return *this->lookupByPackedGlyphID(packedGlyphID);
}
///////////////////////////////////////////////////////////////////////////////
void SkGlyphCache::OnceFillInImage(GlyphAndCache gc) {
SkGlyphCache* cache = gc.cache;
const SkGlyph* glyph = gc.glyph;
cache->fScalerMutex.assertHeld();
if (glyph->fWidth > 0 && glyph->fWidth < kMaxGlyphWidth) {
size_t size = glyph->computeImageSize();
sk_atomic_store(&const_cast<SkGlyph*>(glyph)->fImage,
cache->fGlyphAlloc.alloc(size, SkChunkAlloc::kReturnNil_AllocFailType),
sk_memory_order_relaxed);
if (glyph->fImage != nullptr) {
cache->fScalerContext->getImage(*glyph);
cache->increaseMemoryUsed(size);
}
SkGlyph* SkGlyphCache::lookupByChar(SkUnichar charCode, SkFixed x, SkFixed y) {
PackedUnicharID id = SkGlyph::MakeID(charCode, x, y);
CharGlyphRec* rec = this->getCharGlyphRec(id);
if (rec->fPackedUnicharID != id) {
// this ID is based on the UniChar
rec->fPackedUnicharID = id;
// this ID is based on the glyph index
PackedGlyphID combinedID = SkGlyph::MakeID(fScalerContext->charToGlyphID(charCode), x, y);
rec->fPackedGlyphID = combinedID;
return this->lookupByPackedGlyphID(combinedID);
} else {
return this->lookupByPackedGlyphID(rec->fPackedGlyphID);
}
}
SkGlyph* SkGlyphCache::lookupByPackedGlyphID(PackedGlyphID packedGlyphID) {
SkGlyph* glyph = fGlyphMap.find(packedGlyphID);
if (nullptr == glyph) {
glyph = this->allocateNewGlyph(packedGlyphID);
}
return glyph;
}
SkGlyph* SkGlyphCache::allocateNewGlyph(PackedGlyphID packedGlyphID) {
fMemoryUsed += sizeof(SkGlyph);
SkGlyph* glyphPtr;
{
SkGlyph glyph;
glyph.initGlyphFromCombinedID(packedGlyphID);
glyphPtr = fGlyphMap.set(glyph);
}
fScalerContext->getMetrics(glyphPtr);
SkASSERT(glyphPtr->fID != SkGlyph::kImpossibleID);
return glyphPtr;
}
const void* SkGlyphCache::findImage(const SkGlyph& glyph) {
SkOnce<SkMutex, GlyphAndCache>(
&glyph.fImageIsSet, &fScalerMutex, &SkGlyphCache::OnceFillInImage, {this, &glyph});
return sk_atomic_load(&glyph.fImage, sk_memory_order_seq_cst);
}
void SkGlyphCache::OnceFillInPath(GlyphAndCache gc) {
SkGlyphCache* cache = gc.cache;
const SkGlyph* glyph = gc.glyph;
cache->fScalerMutex.assertHeld();
if (glyph->fWidth > 0) {
sk_atomic_store(&const_cast<SkGlyph*>(glyph)->fPath, new SkPath, sk_memory_order_relaxed);
cache->fScalerContext->getPath(*glyph, glyph->fPath);
size_t size = sizeof(SkPath) + glyph->fPath->countPoints() * sizeof(SkPoint);
cache->increaseMemoryUsed(size);
if (glyph.fWidth > 0 && glyph.fWidth < kMaxGlyphWidth) {
if (nullptr == glyph.fImage) {
size_t size = glyph.computeImageSize();
const_cast<SkGlyph&>(glyph).fImage = fGlyphAlloc.alloc(size,
SkChunkAlloc::kReturnNil_AllocFailType);
// check that alloc() actually succeeded
if (glyph.fImage) {
fScalerContext->getImage(glyph);
// TODO: the scaler may have changed the maskformat during
// getImage (e.g. from AA or LCD to BW) which means we may have
// overallocated the buffer. Check if the new computedImageSize
// is smaller, and if so, strink the alloc size in fImageAlloc.
fMemoryUsed += size;
}
}
}
return glyph.fImage;
}
const SkPath* SkGlyphCache::findPath(const SkGlyph& glyph) {
SkOnce<SkMutex, GlyphAndCache>(
&glyph.fPathIsSet, &fScalerMutex, &SkGlyphCache::OnceFillInPath, {this, &glyph});
return sk_atomic_load(&glyph.fPath, sk_memory_order_seq_cst);
if (glyph.fWidth) {
if (glyph.fPath == nullptr) {
const_cast<SkGlyph&>(glyph).fPath = new SkPath;
fScalerContext->getPath(glyph, glyph.fPath);
fMemoryUsed += sizeof(SkPath) +
glyph.fPath->countPoints() * sizeof(SkPoint);
}
}
return glyph.fPath;
}
void SkGlyphCache::dump() const {
@ -289,22 +229,18 @@ void SkGlyphCache::dump() const {
face->getFamilyName(&name);
SkString msg;
msg.printf(
"cache typeface:%x %25s:%d size:%2g [%g %g %g %g] "
"lum:%02X devG:%d pntG:%d cntr:%d glyphs:%3d",
face->uniqueID(), name.c_str(), face->style(), rec.fTextSize,
matrix[SkMatrix::kMScaleX], matrix[SkMatrix::kMSkewX],
matrix[SkMatrix::kMSkewY], matrix[SkMatrix::kMScaleY],
rec.fLumBits & 0xFF, rec.fDeviceGamma, rec.fPaintGamma, rec.fContrast,
fGlyphMap.count());
msg.printf("cache typeface:%x %25s:%d size:%2g [%g %g %g %g] lum:%02X devG:%d pntG:%d cntr:%d glyphs:%3d",
face->uniqueID(), name.c_str(), face->style(), rec.fTextSize,
matrix[SkMatrix::kMScaleX], matrix[SkMatrix::kMSkewX],
matrix[SkMatrix::kMSkewY], matrix[SkMatrix::kMScaleY],
rec.fLumBits & 0xFF, rec.fDeviceGamma, rec.fPaintGamma, rec.fContrast,
fGlyphMap.count());
SkDebugf("%s\n", msg.c_str());
}
///////////////////////////////////////////////////////////////////////////////
bool SkGlyphCache::getAuxProcData(void (*proc)(void*), void** dataPtr) const {
// Borrow the fScalerMutex to protect the AuxProc list.
SkAutoMutexAcquire lock(fScalerMutex);
const AuxProcRec* rec = fAuxProcList;
while (rec) {
if (rec->fProc == proc) {
@ -323,8 +259,6 @@ void SkGlyphCache::setAuxProc(void (*proc)(void*), void* data) {
return;
}
// Borrow the fScalerMutex to protect the AuxProc linked list.
SkAutoMutexAcquire lock(fScalerMutex);
AuxProcRec* rec = fAuxProcList;
while (rec) {
if (rec->fProc == proc) {
@ -341,9 +275,27 @@ void SkGlyphCache::setAuxProc(void (*proc)(void*), void* data) {
fAuxProcList = rec;
}
void SkGlyphCache::invokeAndRemoveAuxProcs() {
AuxProcRec* rec = fAuxProcList;
while (rec) {
rec->fProc(rec->fData);
AuxProcRec* next = rec->fNext;
delete rec;
rec = next;
}
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
typedef SkAutoTAcquire<SkSpinlock> AutoAcquire;
class AutoAcquire {
public:
AutoAcquire(SkSpinlock& lock) : fLock(lock) { fLock.acquire(); }
~AutoAcquire() { fLock.release(); }
private:
SkSpinlock& fLock;
};
size_t SkGlyphCache_Globals::setCacheSizeLimit(size_t newLimit) {
static const size_t minLimit = 256 * 1024;
@ -374,7 +326,7 @@ int SkGlyphCache_Globals::setCacheCountLimit(int newCount) {
void SkGlyphCache_Globals::purgeAll() {
AutoAcquire ac(fLock);
this->internalPurge(fTotalMemoryUsed.load());
this->internalPurge(fTotalMemoryUsed);
}
/* This guy calls the visitor from within the mutext lock, so the visitor
@ -383,8 +335,10 @@ void SkGlyphCache_Globals::purgeAll() {
- try to acquire the mutext again
- call a fontscaler (which might call into the cache)
*/
SkGlyphCache* SkGlyphCache::VisitCache(
SkTypeface* typeface, const SkDescriptor* desc, VisitProc proc, void* context) {
SkGlyphCache* SkGlyphCache::VisitCache(SkTypeface* typeface,
const SkDescriptor* desc,
bool (*proc)(const SkGlyphCache*, void*),
void* context) {
if (!typeface) {
typeface = SkTypeface::GetDefaultTypeface();
}
@ -400,15 +354,11 @@ SkGlyphCache* SkGlyphCache::VisitCache(
for (cache = globals.internalGetHead(); cache != nullptr; cache = cache->fNext) {
if (cache->fDesc->equals(*desc)) {
globals.internalMoveToHead(cache);
cache->fMapMutex.acquireShared();
globals.internalDetachCache(cache);
if (!proc(cache, context)) {
cache->fMapMutex.releaseShared();
return nullptr;
globals.internalAttachCacheToHead(cache);
cache = nullptr;
}
// The caller will take reference on this SkGlyphCache, and the corresponding
// Attach call will decrement the reference.
cache->fRefCount += 1;
return cache;
}
}
@ -421,43 +371,28 @@ SkGlyphCache* SkGlyphCache::VisitCache(
// pass true the first time, to notice if the scalercontext failed,
// so we can try the purge.
SkScalerContext* ctx = typeface->createScalerContext(desc, true);
if (nullptr == ctx) {
if (!ctx) {
get_globals().purgeAll();
ctx = typeface->createScalerContext(desc, false);
SkASSERT(ctx);
}
cache = new SkGlyphCache(typeface, desc, ctx);
}
AutoAcquire ac(globals.fLock);
globals.internalAttachCacheToHead(cache);
AutoValidate av(cache);
cache->fMapMutex.acquireShared();
if (!proc(cache, context)) { // need to reattach
cache->fMapMutex.releaseShared();
return nullptr;
globals.attachCacheToHead(cache);
cache = nullptr;
}
// The caller will take reference on this SkGlyphCache, and the corresponding
// Attach call will decrement the reference.
cache->fRefCount += 1;
return cache;
}
void SkGlyphCache::AttachCache(SkGlyphCache* cache) {
SkASSERT(cache);
cache->fMapMutex.releaseShared();
SkGlyphCache_Globals& globals = get_globals();
AutoAcquire ac(globals.fLock);
globals.validate();
cache->validate();
SkASSERT(cache->fNext == nullptr);
// Unref and delete if no longer in the LRU list.
cache->fRefCount -= 1;
if (cache->fRefCount == 0) {
delete cache;
}
globals.internalPurge();
get_globals().attachCacheToHead(cache);
}
static void dump_visitor(const SkGlyphCache& cache, void* context) {
@ -532,17 +467,14 @@ void SkGlyphCache::VisitAll(Visitor visitor, void* context) {
///////////////////////////////////////////////////////////////////////////////
void SkGlyphCache_Globals::internalAttachCacheToHead(SkGlyphCache* cache) {
this->internalPurge();
fCacheCount += 1;
cache->fRefCount += 1;
// Access to cache->fMemoryUsed is single threaded until internalMoveToHead.
fTotalMemoryUsed.fetch_add(cache->fMemoryUsed);
this->internalMoveToHead(cache);
void SkGlyphCache_Globals::attachCacheToHead(SkGlyphCache* cache) {
AutoAcquire ac(fLock);
this->validate();
cache->validate();
this->internalAttachCacheToHead(cache);
this->internalPurge();
}
SkGlyphCache* SkGlyphCache_Globals::internalGetTail() const {
@ -559,13 +491,13 @@ size_t SkGlyphCache_Globals::internalPurge(size_t minBytesNeeded) {
this->validate();
size_t bytesNeeded = 0;
if (fTotalMemoryUsed.load() > fCacheSizeLimit) {
bytesNeeded = fTotalMemoryUsed.load() - fCacheSizeLimit;
if (fTotalMemoryUsed > fCacheSizeLimit) {
bytesNeeded = fTotalMemoryUsed - fCacheSizeLimit;
}
bytesNeeded = SkTMax(bytesNeeded, minBytesNeeded);
if (bytesNeeded) {
// no small purges!
bytesNeeded = SkTMax(bytesNeeded, fTotalMemoryUsed.load() >> 2);
bytesNeeded = SkTMax(bytesNeeded, fTotalMemoryUsed >> 2);
}
int countNeeded = 0;
@ -591,10 +523,9 @@ size_t SkGlyphCache_Globals::internalPurge(size_t minBytesNeeded) {
SkGlyphCache* prev = cache->fPrev;
bytesFreed += cache->fMemoryUsed;
countFreed += 1;
this->internalDetachCache(cache);
if (0 == cache->fRefCount) {
delete cache;
}
delete cache;
cache = prev;
}
@ -610,50 +541,34 @@ size_t SkGlyphCache_Globals::internalPurge(size_t minBytesNeeded) {
return bytesFreed;
}
void SkGlyphCache_Globals::internalMoveToHead(SkGlyphCache *cache) {
if (cache != fHead) {
if (cache->fPrev) {
cache->fPrev->fNext = cache->fNext;
}
if (cache->fNext) {
cache->fNext->fPrev = cache->fPrev;
}
cache->fNext = nullptr;
cache->fPrev = nullptr;
if (fHead) {
fHead->fPrev = cache;
cache->fNext = fHead;
}
fHead = cache;
void SkGlyphCache_Globals::internalAttachCacheToHead(SkGlyphCache* cache) {
SkASSERT(nullptr == cache->fPrev && nullptr == cache->fNext);
if (fHead) {
fHead->fPrev = cache;
cache->fNext = fHead;
}
fHead = cache;
fCacheCount += 1;
fTotalMemoryUsed += cache->fMemoryUsed;
}
void SkGlyphCache_Globals::internalDetachCache(SkGlyphCache* cache) {
SkASSERT(fCacheCount > 0);
fCacheCount -= 1;
fTotalMemoryUsed.fetch_sub(cache->fMemoryUsed);
fTotalMemoryUsed -= cache->fMemoryUsed;
if (cache->fPrev) {
cache->fPrev->fNext = cache->fNext;
} else {
// If cache->fPrev == nullptr then this is the head node.
fHead = cache->fNext;
if (fHead != nullptr) {
fHead->fPrev = nullptr;
}
}
if (cache->fNext) {
cache->fNext->fPrev = cache->fPrev;
} else {
// If cache->fNext == nullptr then this is the last node.
if (cache->fPrev != nullptr) {
cache->fPrev->fNext = nullptr;
}
}
cache->fPrev = cache->fNext = nullptr;
cache->fRefCount -= 1;
}
///////////////////////////////////////////////////////////////////////////////
#ifdef SK_DEBUG
@ -672,16 +587,20 @@ void SkGlyphCache::validate() const {
}
void SkGlyphCache_Globals::validate() const {
size_t computedBytes = 0;
int computedCount = 0;
SkGlyphCache* head = fHead;
const SkGlyphCache* head = fHead;
while (head != nullptr) {
computedBytes += head->fMemoryUsed;
computedCount += 1;
head = head->fNext;
}
SkASSERTF(fCacheCount == computedCount, "fCacheCount: %d, computedCount: %d", fCacheCount,
computedCount);
SkASSERTF(fTotalMemoryUsed == computedBytes, "fTotalMemoryUsed: %d, computedBytes: %d",
fTotalMemoryUsed, computedBytes);
}
#endif

View File

@ -11,11 +11,8 @@
#include "SkChunkAlloc.h"
#include "SkDescriptor.h"
#include "SkGlyph.h"
#include "SkMutex.h"
#include "SkTHash.h"
#include "SkScalerContext.h"
#include "SkSharedMutex.h"
#include "SkSpinlock.h"
#include "SkTemplates.h"
#include "SkTDArray.h"
@ -122,23 +119,12 @@ public:
SkScalerContext* getScalerContext() const { return fScalerContext; }
struct GlyphAndCache {
SkGlyphCache* cache;
const SkGlyph* glyph;
};
static void OnceFillInImage(GlyphAndCache gc);
static void OnceFillInPath(GlyphAndCache gc);
typedef bool (*VisitProc)(const SkGlyphCache*, void*);
/** Find a matching cache entry, and call proc() with it. If none is found create a new one.
If the proc() returns true, detach the cache and return it, otherwise leave it and return
nullptr.
*/
static SkGlyphCache* VisitCache(SkTypeface*, const SkDescriptor* desc,
VisitProc proc,
bool (*proc)(const SkGlyphCache*, void*),
void* context);
/** Given a strike that was returned by either VisitCache() or DetachCache() add it back into
@ -195,21 +181,18 @@ public:
private:
friend class SkGlyphCache_Globals;
enum {
kHashBits = 8,
kHashCount = 1 << kHashBits,
kHashMask = kHashCount - 1
};
typedef uint32_t PackedGlyphID; // glyph-index + subpixel-pos
typedef uint32_t PackedUnicharID; // unichar + subpixel-pos
struct CharGlyphRec {
class HashTraits {
public:
static PackedUnicharID GetKey(const CharGlyphRec& rec) {
return rec.fPackedUnicharID;
}
static uint32_t Hash(PackedUnicharID unicharID) {
return SkChecksum::CheapMix(unicharID);
}
};
PackedUnicharID fPackedUnicharID;
PackedGlyphID fPackedGlyphID;
PackedUnicharID fPackedUnicharID;
PackedGlyphID fPackedGlyphID;
};
struct AuxProcRec {
@ -222,9 +205,6 @@ private:
SkGlyphCache(SkTypeface*, const SkDescriptor*, SkScalerContext*);
~SkGlyphCache();
// Increase the memory used keeping the cache and the global size in sync.
void increaseMemoryUsed(size_t used);
// Return the SkGlyph* associated with MakeID. The id parameter is the
// combined glyph/x/y id generated by MakeID. If it is just a glyph id
// then x and y are assumed to be zero.
@ -236,45 +216,32 @@ private:
// Return a new SkGlyph for the glyph ID and subpixel position id.
SkGlyph* allocateNewGlyph(PackedGlyphID packedGlyphID);
// Add the full metrics to an existing glyph.
void addFullMetrics(SkGlyph* glyph);
static bool DetachProc(const SkGlyphCache*, void*) { return true; }
CharGlyphRec PackedUnicharIDtoCharGlyphRec(PackedUnicharID packedUnicharID);
// The id arg is a combined id generated by MakeID.
CharGlyphRec* getCharGlyphRec(PackedUnicharID id);
void invokeAndRemoveAuxProcs();
inline static SkGlyphCache* FindTail(SkGlyphCache* head);
// The following are protected by the SkGlyphCache_Globals fLock mutex.
// Note: the following fields are protected by a mutex in a different class.
SkGlyphCache* fNext;
SkGlyphCache* fPrev;
SkDescriptor* const fDesc;
SkScalerContext* const fScalerContext;
SkPaint::FontMetrics fFontMetrics;
int fRefCount;
// The following fields are protected by fMapMutex.
mutable SkSharedMutex fMapMutex;
// Map from a combined GlyphID and sub-pixel position to a SkGlyph.
SkTHashTable<SkGlyph, PackedGlyphID, SkGlyph::HashTraits> fGlyphMap;
SkChunkAlloc fGlyphAlloc;
typedef SkTHashTable<CharGlyphRec, PackedUnicharID, CharGlyphRec::HashTraits>
PackedUnicharIDToPackedGlyphIDMap;
SkAutoTDelete<PackedUnicharIDToPackedGlyphIDMap> fPackedUnicharIDToPackedGlyphID;
SkAutoTArray<CharGlyphRec> fPackedUnicharIDToPackedGlyphID;
// used to track (approx) how much ram is tied-up in this cache
size_t fMemoryUsed;
// The FScalerMutex protects the following fields. It is mainly used to ensure single-threaded
// access to the font scaler, but it also protects the fAuxProcList.
mutable SkMutex fScalerMutex;
SkScalerContext* const fScalerContext;
AuxProcRec* fAuxProcList;
// BEWARE: Mutex ordering
// If you need to hold both fMapMutex and fScalerMutex then fMapMutex must be held first.
};
class SkAutoGlyphCacheBase {

View File

@ -26,9 +26,8 @@
class SkGlyphCache_Globals {
public:
SkGlyphCache_Globals() {
fHead = nullptr;
fTotalMemoryUsed.store(0);
fTotalMemoryUsed = 0;
fCacheSizeLimit = SK_DEFAULT_FONT_CACHE_LIMIT;
fCacheCount = 0;
fCacheCountLimit = SK_DEFAULT_FONT_CACHE_COUNT_LIMIT;
@ -48,8 +47,7 @@ public:
SkGlyphCache* internalGetHead() const { return fHead; }
SkGlyphCache* internalGetTail() const;
size_t getTotalMemoryUsed() const { return fTotalMemoryUsed.load(); }
void increaseTotalMemoryUsed(size_t increase) { fTotalMemoryUsed.fetch_add(increase);}
size_t getTotalMemoryUsed() const { return fTotalMemoryUsed; }
int getCacheCountUsed() const { return fCacheCount; }
#ifdef SK_DEBUG
@ -68,30 +66,29 @@ public:
// or count limit.
bool isOverBudget() const {
return fCacheCount > fCacheCountLimit ||
fTotalMemoryUsed.load() > fCacheSizeLimit;
fTotalMemoryUsed > fCacheSizeLimit;
}
void purgeAll(); // does not change budget
// call when a glyphcache is available for caching (i.e. not in use)
void internalAttachCacheToHead(SkGlyphCache*);
void attachCacheToHead(SkGlyphCache*);
// can only be called when the mutex is already held
void internalMoveToHead(SkGlyphCache *);
void internalDetachCache(SkGlyphCache*);
void internalAttachCacheToHead(SkGlyphCache*);
private:
SkGlyphCache* fHead;
size_t fTotalMemoryUsed;
size_t fCacheSizeLimit;
int32_t fCacheCountLimit;
int32_t fCacheCount;
// Checkout budgets, modulated by the specified min-bytes-needed-to-purge,
// and attempt to purge caches to match.
// Returns number of bytes freed.
void internalDetachCache(SkGlyphCache* cache);
size_t internalPurge(size_t minBytesNeeded = 0);
private:
SkGlyphCache* fHead;
SkAtomic<size_t> fTotalMemoryUsed;
size_t fCacheSizeLimit;
int32_t fCacheCountLimit;
int32_t fCacheCount;
};
#endif