diff --git a/include/core/SkAtomics.h b/include/core/SkAtomics.h index d31d9c64fb..7c5294b76d 100644 --- a/include/core/SkAtomics.h +++ b/include/core/SkAtomics.h @@ -29,6 +29,9 @@ void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst); template T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst); +template +T sk_atomic_fetch_sub(T*, T, sk_memory_order = sk_memory_order_seq_cst); + template bool sk_atomic_compare_exchange(T*, T* expected, T desired, sk_memory_order success = sk_memory_order_seq_cst, @@ -58,6 +61,10 @@ public: return sk_atomic_fetch_add(&fVal, val, mo); } + T fetch_sub(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { + return sk_atomic_fetch_sub(&fVal, val, mo); + } + bool compare_exchange(T* expected, const T& desired, sk_memory_order success = sk_memory_order_seq_cst, sk_memory_order failure = sk_memory_order_seq_cst) { diff --git a/include/ports/SkAtomics_atomic.h b/include/ports/SkAtomics_atomic.h index ddbf7c3f37..64ee823f90 100644 --- a/include/ports/SkAtomics_atomic.h +++ b/include/ports/SkAtomics_atomic.h @@ -31,6 +31,12 @@ T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) { return __atomic_fetch_add(ptr, val, mo); } +template +T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order mo) { + // All values of mo are valid. + return __atomic_fetch_sub(ptr, val, mo); +} + template bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, sk_memory_order success, diff --git a/include/ports/SkAtomics_std.h b/include/ports/SkAtomics_std.h index 4c26858dfd..163efb78c0 100644 --- a/include/ports/SkAtomics_std.h +++ b/include/ports/SkAtomics_std.h @@ -38,6 +38,13 @@ T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) { return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo); } +template +T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order mo) { + // All values of mo are valid. + std::atomic* ap = reinterpret_cast*>(ptr); + return std::atomic_fetch_sub_explicit(ap, val, (std::memory_order)mo); +} + template bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, sk_memory_order success, diff --git a/include/ports/SkAtomics_sync.h b/include/ports/SkAtomics_sync.h index 7ca0b46a94..02b1e58072 100644 --- a/include/ports/SkAtomics_sync.h +++ b/include/ports/SkAtomics_sync.h @@ -45,6 +45,11 @@ T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order) { return __sync_fetch_and_add(ptr, val); } +template +T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order) { + return __sync_fetch_and_sub(ptr, val); +} + template bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, sk_memory_order, sk_memory_order) { T prev = __sync_val_compare_and_swap(ptr, *expected, desired); diff --git a/src/core/SkDraw.cpp b/src/core/SkDraw.cpp index 8e1a132b4d..f4674b7798 100644 --- a/src/core/SkDraw.cpp +++ b/src/core/SkDraw.cpp @@ -1476,14 +1476,12 @@ static void D1G_RectClip(const SkDraw1Glyph& state, Sk48Dot16 fx, Sk48Dot16 fy, bounds = &storage; } - uint8_t* aa = (uint8_t*)glyph.fImage; + uint8_t*aa = (uint8_t*)state.fCache->findImage(glyph); if (nullptr == aa) { - aa = (uint8_t*)state.fCache->findImage(glyph); - if (nullptr == aa) { - return; // can't rasterize glyph - } + return; // can't rasterize glyph } + mask.fRowBytes = glyph.rowBytes(); mask.fFormat = static_cast(glyph.fMaskFormat); mask.fImage = aa; diff --git a/src/core/SkGlyph.h b/src/core/SkGlyph.h index c747995ed0..102088224d 100644 --- a/src/core/SkGlyph.h +++ b/src/core/SkGlyph.h @@ -47,6 +47,8 @@ class SkGlyph { uint8_t fMaskFormat; int8_t fRsbDelta, fLsbDelta; // used by auto-kerning int8_t fForceBW; + mutable bool fImageIsSet; + mutable bool fPathIsSet; void initWithGlyphID(uint32_t glyph_id) { this->initCommon(MakeID(glyph_id)); @@ -135,6 +137,8 @@ class SkGlyph { fPath = nullptr; fMaskFormat = MASK_FORMAT_UNKNOWN; fForceBW = 0; + fImageIsSet = false; + fPathIsSet = false; } static unsigned ID2Code(uint32_t id) { diff --git a/src/core/SkGlyphCache.cpp b/src/core/SkGlyphCache.cpp index 309707c66f..a337eb0a87 100644 --- a/src/core/SkGlyphCache.cpp +++ b/src/core/SkGlyphCache.cpp @@ -9,6 +9,7 @@ #include "SkGlyphCache_Globals.h" #include "SkGraphics.h" #include "SkLazyPtr.h" +#include "SkOnce.h" #include "SkPath.h" #include "SkTemplates.h" #include "SkTraceMemoryDump.h" @@ -45,42 +46,90 @@ static SkGlyphCache_Globals& get_globals() { #define kMinAllocAmount ((sizeof(SkGlyph) + kMinGlyphImageSize) * kMinGlyphCount) SkGlyphCache::SkGlyphCache(SkTypeface* typeface, const SkDescriptor* desc, SkScalerContext* ctx) - : fDesc(desc->copy()) + : fNext(nullptr) + , fPrev(nullptr) + , fDesc(desc->copy()) + , fRefCount(0) + , fGlyphAlloc(kMinAllocAmount) + , fMemoryUsed(sizeof(*this)) , fScalerContext(ctx) - , fGlyphAlloc(kMinAllocAmount) { + , fAuxProcList(nullptr) { SkASSERT(typeface); SkASSERT(desc); SkASSERT(ctx); - fPrev = fNext = nullptr; - fScalerContext->getFontMetrics(&fFontMetrics); - - fMemoryUsed = sizeof(*this); - - fAuxProcList = nullptr; } SkGlyphCache::~SkGlyphCache() { fGlyphMap.foreach ([](SkGlyph* g) { delete g->fPath; }); SkDescriptor::Free(fDesc); delete fScalerContext; - this->invokeAndRemoveAuxProcs(); + AuxProcRec* rec = fAuxProcList; + while (rec) { + rec->fProc(rec->fData); + AuxProcRec* next = rec->fNext; + delete rec; + rec = next; + } +} + +void SkGlyphCache::increaseMemoryUsed(size_t used) { + fMemoryUsed += used; + get_globals().increaseTotalMemoryUsed(used); +} + +SkGlyphCache::CharGlyphRec +SkGlyphCache::PackedUnicharIDtoCharGlyphRec(PackedUnicharID packedUnicharID) { + SkFixed x = SkGlyph::SubToFixed(SkGlyph::ID2SubX(packedUnicharID)); + SkFixed y = SkGlyph::SubToFixed(SkGlyph::ID2SubY(packedUnicharID)); + SkUnichar unichar = SkGlyph::ID2Code(packedUnicharID); + + SkAutoMutexAcquire lock(fScalerMutex); + PackedGlyphID packedGlyphID = SkGlyph::MakeID(fScalerContext->charToGlyphID(unichar), x, y); + + return {packedUnicharID, packedGlyphID}; } SkGlyphCache::CharGlyphRec* SkGlyphCache::getCharGlyphRec(PackedUnicharID packedUnicharID) { if (nullptr == fPackedUnicharIDToPackedGlyphID.get()) { - // Allocate the array. - fPackedUnicharIDToPackedGlyphID.reset(kHashCount); - // Initialize array to map character and position with the impossible glyph ID. This - // represents no mapping. - for (int i = 0; i glyph mapping. + { + SkAutoTAcquire lock(fMapMutex); + + // Now that the cache is locked exclusively, make sure no one added this array + // while unlocked. + if (nullptr == fPackedUnicharIDToPackedGlyphID.get()) { + // Allocate the array. + fPackedUnicharIDToPackedGlyphID.reset(new PackedUnicharIDToPackedGlyphIDMap); + } + + fPackedUnicharIDToPackedGlyphID->set(PackedUnicharIDtoCharGlyphRec(packedUnicharID)); } + fMapMutex.acquireShared(); + + return fPackedUnicharIDToPackedGlyphID->find(packedUnicharID); } - return &fPackedUnicharIDToPackedGlyphID[SkChecksum::CheapMix(packedUnicharID) & kHashMask]; + CharGlyphRec* answer = fPackedUnicharIDToPackedGlyphID->find(packedUnicharID); + if (nullptr == answer) { + fMapMutex.releaseShared(); + // Add a new char -> glyph mapping. + { + SkAutoTAcquire lock(fMapMutex); + answer = fPackedUnicharIDToPackedGlyphID->find(packedUnicharID); + if (nullptr == answer) { + fPackedUnicharIDToPackedGlyphID->set( + PackedUnicharIDtoCharGlyphRec(packedUnicharID)); + } + } + fMapMutex.acquireShared(); + return fPackedUnicharIDToPackedGlyphID->find(packedUnicharID); + } + + return answer; } /////////////////////////////////////////////////////////////////////////////// @@ -95,15 +144,11 @@ uint16_t SkGlyphCache::unicharToGlyph(SkUnichar charCode) { VALIDATE(); PackedUnicharID packedUnicharID = SkGlyph::MakeID(charCode); const CharGlyphRec& rec = *this->getCharGlyphRec(packedUnicharID); - - if (rec.fPackedUnicharID == packedUnicharID) { - return SkGlyph::ID2Code(rec.fPackedGlyphID); - } else { - return fScalerContext->charToGlyphID(charCode); - } + return SkGlyph::ID2Code(rec.fPackedGlyphID); } SkUnichar SkGlyphCache::glyphToUnichar(uint16_t glyphID) { + SkAutoMutexAcquire lock(fScalerMutex); return fScalerContext->glyphIDToChar(glyphID); } @@ -117,19 +162,19 @@ int SkGlyphCache::countCachedGlyphs() const { /////////////////////////////////////////////////////////////////////////////// +SkGlyph* SkGlyphCache::lookupByChar(SkUnichar charCode, SkFixed x, SkFixed y) { + PackedUnicharID targetUnicharID = SkGlyph::MakeID(charCode, x, y); + CharGlyphRec* rec = this->getCharGlyphRec(targetUnicharID); + PackedGlyphID packedGlyphID = rec->fPackedGlyphID; + + return this->lookupByPackedGlyphID(packedGlyphID); +} + const SkGlyph& SkGlyphCache::getUnicharAdvance(SkUnichar charCode) { VALIDATE(); return *this->lookupByChar(charCode); } -const SkGlyph& SkGlyphCache::getGlyphIDAdvance(uint16_t glyphID) { - VALIDATE(); - PackedGlyphID packedGlyphID = SkGlyph::MakeID(glyphID); - return *this->lookupByPackedGlyphID(packedGlyphID); -} - -/////////////////////////////////////////////////////////////////////////////// - const SkGlyph& SkGlyphCache::getUnicharMetrics(SkUnichar charCode) { VALIDATE(); return *this->lookupByChar(charCode); @@ -140,6 +185,49 @@ const SkGlyph& SkGlyphCache::getUnicharMetrics(SkUnichar charCode, SkFixed x, Sk return *this->lookupByChar(charCode, x, y); } +/////////////////////////////////////////////////////////////////////////////// +SkGlyph* SkGlyphCache::allocateNewGlyph(PackedGlyphID packedGlyphID) { + SkGlyph* glyphPtr; + { + fMapMutex.releaseShared(); + { + SkAutoTAcquire mapLock(fMapMutex); + glyphPtr = fGlyphMap.find(packedGlyphID); + if (nullptr == glyphPtr) { + SkGlyph glyph; + glyph.initGlyphFromCombinedID(packedGlyphID); + { + SkAutoMutexAcquire lock(fScalerMutex); + fScalerContext->getMetrics(&glyph); + this->increaseMemoryUsed(sizeof(SkGlyph)); + glyphPtr = fGlyphMap.set(glyph); + } // drop scaler lock + + } + } // drop map lock + fMapMutex.acquireShared(); + glyphPtr = fGlyphMap.find(packedGlyphID); + } + + SkASSERT(glyphPtr->fID != SkGlyph::kImpossibleID); + return glyphPtr; +} + +SkGlyph* SkGlyphCache::lookupByPackedGlyphID(PackedGlyphID packedGlyphID) { + SkGlyph* glyph = fGlyphMap.find(packedGlyphID); + + if (nullptr == glyph) { + glyph = this->allocateNewGlyph(packedGlyphID); + } + return glyph; +} + +const SkGlyph& SkGlyphCache::getGlyphIDAdvance(uint16_t glyphID) { + VALIDATE(); + PackedGlyphID packedGlyphID = SkGlyph::MakeID(glyphID); + return *this->lookupByPackedGlyphID(packedGlyphID); +} + const SkGlyph& SkGlyphCache::getGlyphIDMetrics(uint16_t glyphID) { VALIDATE(); PackedGlyphID packedGlyphID = SkGlyph::MakeID(glyphID); @@ -152,74 +240,46 @@ const SkGlyph& SkGlyphCache::getGlyphIDMetrics(uint16_t glyphID, SkFixed x, SkFi return *this->lookupByPackedGlyphID(packedGlyphID); } -SkGlyph* SkGlyphCache::lookupByChar(SkUnichar charCode, SkFixed x, SkFixed y) { - PackedUnicharID id = SkGlyph::MakeID(charCode, x, y); - CharGlyphRec* rec = this->getCharGlyphRec(id); - if (rec->fPackedUnicharID != id) { - // this ID is based on the UniChar - rec->fPackedUnicharID = id; - // this ID is based on the glyph index - PackedGlyphID combinedID = SkGlyph::MakeID(fScalerContext->charToGlyphID(charCode), x, y); - rec->fPackedGlyphID = combinedID; - return this->lookupByPackedGlyphID(combinedID); - } else { - return this->lookupByPackedGlyphID(rec->fPackedGlyphID); +/////////////////////////////////////////////////////////////////////////////// + +void SkGlyphCache::OnceFillInImage(GlyphAndCache gc) { + SkGlyphCache* cache = gc.cache; + const SkGlyph* glyph = gc.glyph; + cache->fScalerMutex.assertHeld(); + if (glyph->fWidth > 0 && glyph->fWidth < kMaxGlyphWidth) { + size_t size = glyph->computeImageSize(); + sk_atomic_store(&const_cast(glyph)->fImage, + cache->fGlyphAlloc.alloc(size, SkChunkAlloc::kReturnNil_AllocFailType), + sk_memory_order_relaxed); + if (glyph->fImage != nullptr) { + cache->fScalerContext->getImage(*glyph); + cache->increaseMemoryUsed(size); + } } } -SkGlyph* SkGlyphCache::lookupByPackedGlyphID(PackedGlyphID packedGlyphID) { - SkGlyph* glyph = fGlyphMap.find(packedGlyphID); - if (nullptr == glyph) { - glyph = this->allocateNewGlyph(packedGlyphID); - } - return glyph; -} - -SkGlyph* SkGlyphCache::allocateNewGlyph(PackedGlyphID packedGlyphID) { - fMemoryUsed += sizeof(SkGlyph); - - SkGlyph* glyphPtr; - { - SkGlyph glyph; - glyph.initGlyphFromCombinedID(packedGlyphID); - glyphPtr = fGlyphMap.set(glyph); - } - fScalerContext->getMetrics(glyphPtr); - - SkASSERT(glyphPtr->fID != SkGlyph::kImpossibleID); - return glyphPtr; -} - const void* SkGlyphCache::findImage(const SkGlyph& glyph) { - if (glyph.fWidth > 0 && glyph.fWidth < kMaxGlyphWidth) { - if (nullptr == glyph.fImage) { - size_t size = glyph.computeImageSize(); - const_cast(glyph).fImage = fGlyphAlloc.alloc(size, - SkChunkAlloc::kReturnNil_AllocFailType); - // check that alloc() actually succeeded - if (glyph.fImage) { - fScalerContext->getImage(glyph); - // TODO: the scaler may have changed the maskformat during - // getImage (e.g. from AA or LCD to BW) which means we may have - // overallocated the buffer. Check if the new computedImageSize - // is smaller, and if so, strink the alloc size in fImageAlloc. - fMemoryUsed += size; - } - } + SkOnce( + &glyph.fImageIsSet, &fScalerMutex, &SkGlyphCache::OnceFillInImage, {this, &glyph}); + return sk_atomic_load(&glyph.fImage, sk_memory_order_seq_cst); +} + +void SkGlyphCache::OnceFillInPath(GlyphAndCache gc) { + SkGlyphCache* cache = gc.cache; + const SkGlyph* glyph = gc.glyph; + cache->fScalerMutex.assertHeld(); + if (glyph->fWidth > 0) { + sk_atomic_store(&const_cast(glyph)->fPath, new SkPath, sk_memory_order_relaxed); + cache->fScalerContext->getPath(*glyph, glyph->fPath); + size_t size = sizeof(SkPath) + glyph->fPath->countPoints() * sizeof(SkPoint); + cache->increaseMemoryUsed(size); } - return glyph.fImage; } const SkPath* SkGlyphCache::findPath(const SkGlyph& glyph) { - if (glyph.fWidth) { - if (glyph.fPath == nullptr) { - const_cast(glyph).fPath = new SkPath; - fScalerContext->getPath(glyph, glyph.fPath); - fMemoryUsed += sizeof(SkPath) + - glyph.fPath->countPoints() * sizeof(SkPoint); - } - } - return glyph.fPath; + SkOnce( + &glyph.fPathIsSet, &fScalerMutex, &SkGlyphCache::OnceFillInPath, {this, &glyph}); + return sk_atomic_load(&glyph.fPath, sk_memory_order_seq_cst); } void SkGlyphCache::dump() const { @@ -232,18 +292,22 @@ void SkGlyphCache::dump() const { face->getFamilyName(&name); SkString msg; - msg.printf("cache typeface:%x %25s:%d size:%2g [%g %g %g %g] lum:%02X devG:%d pntG:%d cntr:%d glyphs:%3d", - face->uniqueID(), name.c_str(), face->style(), rec.fTextSize, - matrix[SkMatrix::kMScaleX], matrix[SkMatrix::kMSkewX], - matrix[SkMatrix::kMSkewY], matrix[SkMatrix::kMScaleY], - rec.fLumBits & 0xFF, rec.fDeviceGamma, rec.fPaintGamma, rec.fContrast, - fGlyphMap.count()); + msg.printf( + "cache typeface:%x %25s:%d size:%2g [%g %g %g %g] " + "lum:%02X devG:%d pntG:%d cntr:%d glyphs:%3d", + face->uniqueID(), name.c_str(), face->style(), rec.fTextSize, + matrix[SkMatrix::kMScaleX], matrix[SkMatrix::kMSkewX], + matrix[SkMatrix::kMSkewY], matrix[SkMatrix::kMScaleY], + rec.fLumBits & 0xFF, rec.fDeviceGamma, rec.fPaintGamma, rec.fContrast, + fGlyphMap.count()); SkDebugf("%s\n", msg.c_str()); } /////////////////////////////////////////////////////////////////////////////// bool SkGlyphCache::getAuxProcData(void (*proc)(void*), void** dataPtr) const { + // Borrow the fScalerMutex to protect the AuxProc list. + SkAutoMutexAcquire lock(fScalerMutex); const AuxProcRec* rec = fAuxProcList; while (rec) { if (rec->fProc == proc) { @@ -262,6 +326,8 @@ void SkGlyphCache::setAuxProc(void (*proc)(void*), void* data) { return; } + // Borrow the fScalerMutex to protect the AuxProc linked list. + SkAutoMutexAcquire lock(fScalerMutex); AuxProcRec* rec = fAuxProcList; while (rec) { if (rec->fProc == proc) { @@ -278,27 +344,9 @@ void SkGlyphCache::setAuxProc(void (*proc)(void*), void* data) { fAuxProcList = rec; } -void SkGlyphCache::invokeAndRemoveAuxProcs() { - AuxProcRec* rec = fAuxProcList; - while (rec) { - rec->fProc(rec->fData); - AuxProcRec* next = rec->fNext; - delete rec; - rec = next; - } -} - -/////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// - -class AutoAcquire { -public: - AutoAcquire(SkSpinlock& lock) : fLock(lock) { fLock.acquire(); } - ~AutoAcquire() { fLock.release(); } -private: - SkSpinlock& fLock; -}; +typedef SkAutoTAcquire AutoAcquire; size_t SkGlyphCache_Globals::setCacheSizeLimit(size_t newLimit) { static const size_t minLimit = 256 * 1024; @@ -329,7 +377,7 @@ int SkGlyphCache_Globals::setCacheCountLimit(int newCount) { void SkGlyphCache_Globals::purgeAll() { AutoAcquire ac(fLock); - this->internalPurge(fTotalMemoryUsed); + this->internalPurge(fTotalMemoryUsed.load()); } /* This guy calls the visitor from within the mutext lock, so the visitor @@ -338,10 +386,8 @@ void SkGlyphCache_Globals::purgeAll() { - try to acquire the mutext again - call a fontscaler (which might call into the cache) */ -SkGlyphCache* SkGlyphCache::VisitCache(SkTypeface* typeface, - const SkDescriptor* desc, - bool (*proc)(const SkGlyphCache*, void*), - void* context) { +SkGlyphCache* SkGlyphCache::VisitCache( + SkTypeface* typeface, const SkDescriptor* desc, VisitProc proc, void* context) { if (!typeface) { typeface = SkTypeface::GetDefaultTypeface(); } @@ -357,11 +403,15 @@ SkGlyphCache* SkGlyphCache::VisitCache(SkTypeface* typeface, for (cache = globals.internalGetHead(); cache != nullptr; cache = cache->fNext) { if (cache->fDesc->equals(*desc)) { - globals.internalDetachCache(cache); + globals.internalMoveToHead(cache); + cache->fMapMutex.acquireShared(); if (!proc(cache, context)) { - globals.internalAttachCacheToHead(cache); - cache = nullptr; + cache->fMapMutex.releaseShared(); + return nullptr; } + // The caller will take reference on this SkGlyphCache, and the corresponding + // Attach call will decrement the reference. + cache->fRefCount += 1; return cache; } } @@ -374,28 +424,45 @@ SkGlyphCache* SkGlyphCache::VisitCache(SkTypeface* typeface, // pass true the first time, to notice if the scalercontext failed, // so we can try the purge. SkScalerContext* ctx = typeface->createScalerContext(desc, true); - if (!ctx) { + if (nullptr == ctx) { get_globals().purgeAll(); ctx = typeface->createScalerContext(desc, false); SkASSERT(ctx); } + cache = new SkGlyphCache(typeface, desc, ctx); + + globals.attachCacheToHead(cache); } AutoValidate av(cache); + AutoAcquire ac(globals.fLock); + cache->fMapMutex.acquireShared(); if (!proc(cache, context)) { // need to reattach - globals.attachCacheToHead(cache); - cache = nullptr; + cache->fMapMutex.releaseShared(); + return nullptr; } + // The caller will take reference on this SkGlyphCache, and the corresponding + // Attach call will decrement the reference. + cache->fRefCount += 1; return cache; } void SkGlyphCache::AttachCache(SkGlyphCache* cache) { SkASSERT(cache); - SkASSERT(cache->fNext == nullptr); + cache->fMapMutex.releaseShared(); + SkGlyphCache_Globals& globals = get_globals(); + AutoAcquire ac(globals.fLock); + globals.validate(); + cache->validate(); - get_globals().attachCacheToHead(cache); + // Unref and delete if no longer in the LRU list. + cache->fRefCount -= 1; + if (cache->fRefCount == 0) { + delete cache; + } + globals.internalPurge(); } static void dump_visitor(const SkGlyphCache& cache, void* context) { @@ -473,10 +540,16 @@ void SkGlyphCache::VisitAll(Visitor visitor, void* context) { void SkGlyphCache_Globals::attachCacheToHead(SkGlyphCache* cache) { AutoAcquire ac(fLock); + fCacheCount += 1; + cache->fRefCount += 1; + // Access to cache->fMemoryUsed is single threaded until internalMoveToHead. + fTotalMemoryUsed.fetch_add(cache->fMemoryUsed); + + this->internalMoveToHead(cache); + this->validate(); cache->validate(); - this->internalAttachCacheToHead(cache); this->internalPurge(); } @@ -494,13 +567,13 @@ size_t SkGlyphCache_Globals::internalPurge(size_t minBytesNeeded) { this->validate(); size_t bytesNeeded = 0; - if (fTotalMemoryUsed > fCacheSizeLimit) { - bytesNeeded = fTotalMemoryUsed - fCacheSizeLimit; + if (fTotalMemoryUsed.load() > fCacheSizeLimit) { + bytesNeeded = fTotalMemoryUsed.load() - fCacheSizeLimit; } bytesNeeded = SkTMax(bytesNeeded, minBytesNeeded); if (bytesNeeded) { // no small purges! - bytesNeeded = SkTMax(bytesNeeded, fTotalMemoryUsed >> 2); + bytesNeeded = SkTMax(bytesNeeded, fTotalMemoryUsed.load() >> 2); } int countNeeded = 0; @@ -526,9 +599,10 @@ size_t SkGlyphCache_Globals::internalPurge(size_t minBytesNeeded) { SkGlyphCache* prev = cache->fPrev; bytesFreed += cache->fMemoryUsed; countFreed += 1; - this->internalDetachCache(cache); - delete cache; + if (0 == cache->fRefCount) { + delete cache; + } cache = prev; } @@ -544,34 +618,50 @@ size_t SkGlyphCache_Globals::internalPurge(size_t minBytesNeeded) { return bytesFreed; } -void SkGlyphCache_Globals::internalAttachCacheToHead(SkGlyphCache* cache) { - SkASSERT(nullptr == cache->fPrev && nullptr == cache->fNext); - if (fHead) { - fHead->fPrev = cache; - cache->fNext = fHead; +void SkGlyphCache_Globals::internalMoveToHead(SkGlyphCache *cache) { + if (cache != fHead) { + if (cache->fPrev) { + cache->fPrev->fNext = cache->fNext; + } + if (cache->fNext) { + cache->fNext->fPrev = cache->fPrev; + } + cache->fNext = nullptr; + cache->fPrev = nullptr; + if (fHead) { + fHead->fPrev = cache; + cache->fNext = fHead; + } + fHead = cache; } - fHead = cache; - - fCacheCount += 1; - fTotalMemoryUsed += cache->fMemoryUsed; } void SkGlyphCache_Globals::internalDetachCache(SkGlyphCache* cache) { - SkASSERT(fCacheCount > 0); fCacheCount -= 1; - fTotalMemoryUsed -= cache->fMemoryUsed; + fTotalMemoryUsed.fetch_sub(cache->fMemoryUsed); if (cache->fPrev) { cache->fPrev->fNext = cache->fNext; } else { + // If cache->fPrev == nullptr then this is the head node. fHead = cache->fNext; + if (fHead != nullptr) { + fHead->fPrev = nullptr; + } } if (cache->fNext) { cache->fNext->fPrev = cache->fPrev; + } else { + // If cache->fNext == nullptr then this is the last node. + if (cache->fPrev != nullptr) { + cache->fPrev->fNext = nullptr; + } } cache->fPrev = cache->fNext = nullptr; + cache->fRefCount -= 1; } + /////////////////////////////////////////////////////////////////////////////// #ifdef SK_DEBUG @@ -590,20 +680,16 @@ void SkGlyphCache::validate() const { } void SkGlyphCache_Globals::validate() const { - size_t computedBytes = 0; int computedCount = 0; - const SkGlyphCache* head = fHead; + SkGlyphCache* head = fHead; while (head != nullptr) { - computedBytes += head->fMemoryUsed; computedCount += 1; head = head->fNext; } SkASSERTF(fCacheCount == computedCount, "fCacheCount: %d, computedCount: %d", fCacheCount, computedCount); - SkASSERTF(fTotalMemoryUsed == computedBytes, "fTotalMemoryUsed: %d, computedBytes: %d", - fTotalMemoryUsed, computedBytes); } #endif diff --git a/src/core/SkGlyphCache.h b/src/core/SkGlyphCache.h index 4eb6b5bc60..cf64527b49 100644 --- a/src/core/SkGlyphCache.h +++ b/src/core/SkGlyphCache.h @@ -11,8 +11,11 @@ #include "SkChunkAlloc.h" #include "SkDescriptor.h" #include "SkGlyph.h" +#include "SkMutex.h" #include "SkTHash.h" #include "SkScalerContext.h" +#include "SkSharedMutex.h" +#include "SkSpinlock.h" #include "SkTemplates.h" #include "SkTDArray.h" @@ -119,12 +122,23 @@ public: SkScalerContext* getScalerContext() const { return fScalerContext; } + struct GlyphAndCache { + SkGlyphCache* cache; + const SkGlyph* glyph; + }; + + static void OnceFillInImage(GlyphAndCache gc); + + static void OnceFillInPath(GlyphAndCache gc); + + typedef bool (*VisitProc)(const SkGlyphCache*, void*); + /** Find a matching cache entry, and call proc() with it. If none is found create a new one. If the proc() returns true, detach the cache and return it, otherwise leave it and return nullptr. */ static SkGlyphCache* VisitCache(SkTypeface*, const SkDescriptor* desc, - bool (*proc)(const SkGlyphCache*, void*), + VisitProc proc, void* context); /** Given a strike that was returned by either VisitCache() or DetachCache() add it back into @@ -181,18 +195,21 @@ public: private: friend class SkGlyphCache_Globals; - enum { - kHashBits = 8, - kHashCount = 1 << kHashBits, - kHashMask = kHashCount - 1 - }; - typedef uint32_t PackedGlyphID; // glyph-index + subpixel-pos typedef uint32_t PackedUnicharID; // unichar + subpixel-pos struct CharGlyphRec { - PackedUnicharID fPackedUnicharID; - PackedGlyphID fPackedGlyphID; + class HashTraits { + public: + static PackedUnicharID GetKey(const CharGlyphRec& rec) { + return rec.fPackedUnicharID; + } + static uint32_t Hash(PackedUnicharID unicharID) { + return SkChecksum::CheapMix(unicharID); + } + }; + PackedUnicharID fPackedUnicharID; + PackedGlyphID fPackedGlyphID; }; struct AuxProcRec { @@ -205,6 +222,9 @@ private: SkGlyphCache(SkTypeface*, const SkDescriptor*, SkScalerContext*); ~SkGlyphCache(); + // Increase the memory used keeping the cache and the global size in sync. + void increaseMemoryUsed(size_t used); + // Return the SkGlyph* associated with MakeID. The id parameter is the // combined glyph/x/y id generated by MakeID. If it is just a glyph id // then x and y are assumed to be zero. @@ -216,32 +236,45 @@ private: // Return a new SkGlyph for the glyph ID and subpixel position id. SkGlyph* allocateNewGlyph(PackedGlyphID packedGlyphID); + // Add the full metrics to an existing glyph. + void addFullMetrics(SkGlyph* glyph); + static bool DetachProc(const SkGlyphCache*, void*) { return true; } + CharGlyphRec PackedUnicharIDtoCharGlyphRec(PackedUnicharID packedUnicharID); + // The id arg is a combined id generated by MakeID. CharGlyphRec* getCharGlyphRec(PackedUnicharID id); - void invokeAndRemoveAuxProcs(); - inline static SkGlyphCache* FindTail(SkGlyphCache* head); + // The following are protected by the SkGlyphCache_Globals fLock mutex. + // Note: the following fields are protected by a mutex in a different class. SkGlyphCache* fNext; SkGlyphCache* fPrev; SkDescriptor* const fDesc; - SkScalerContext* const fScalerContext; SkPaint::FontMetrics fFontMetrics; + int fRefCount; + // The following fields are protected by fMapMutex. + mutable SkSharedMutex fMapMutex; // Map from a combined GlyphID and sub-pixel position to a SkGlyph. SkTHashTable fGlyphMap; - SkChunkAlloc fGlyphAlloc; - - SkAutoTArray fPackedUnicharIDToPackedGlyphID; - + typedef SkTHashTable + PackedUnicharIDToPackedGlyphIDMap; + SkAutoTDelete fPackedUnicharIDToPackedGlyphID; // used to track (approx) how much ram is tied-up in this cache size_t fMemoryUsed; + // The FScalerMutex protects the following fields. It is mainly used to ensure single-threaded + // access to the font scaler, but it also protects the fAuxProcList. + mutable SkMutex fScalerMutex; + SkScalerContext* const fScalerContext; AuxProcRec* fAuxProcList; + + // BEWARE: Mutex ordering + // If you need to hold both fMapMutex and fScalerMutex then fMapMutex must be held first. }; class SkAutoGlyphCacheBase { diff --git a/src/core/SkGlyphCache_Globals.h b/src/core/SkGlyphCache_Globals.h index e1825a2f0f..a0069e1433 100644 --- a/src/core/SkGlyphCache_Globals.h +++ b/src/core/SkGlyphCache_Globals.h @@ -26,8 +26,9 @@ class SkGlyphCache_Globals { public: SkGlyphCache_Globals() { + fHead = nullptr; - fTotalMemoryUsed = 0; + fTotalMemoryUsed.store(0); fCacheSizeLimit = SK_DEFAULT_FONT_CACHE_LIMIT; fCacheCount = 0; fCacheCountLimit = SK_DEFAULT_FONT_CACHE_COUNT_LIMIT; @@ -47,7 +48,8 @@ public: SkGlyphCache* internalGetHead() const { return fHead; } SkGlyphCache* internalGetTail() const; - size_t getTotalMemoryUsed() const { return fTotalMemoryUsed; } + size_t getTotalMemoryUsed() const { return fTotalMemoryUsed.load(); } + void increaseTotalMemoryUsed(size_t increase) { fTotalMemoryUsed.fetch_add(increase);} int getCacheCountUsed() const { return fCacheCount; } #ifdef SK_DEBUG @@ -66,7 +68,7 @@ public: // or count limit. bool isOverBudget() const { return fCacheCount > fCacheCountLimit || - fTotalMemoryUsed > fCacheSizeLimit; + fTotalMemoryUsed.load() > fCacheSizeLimit; } void purgeAll(); // does not change budget @@ -75,20 +77,21 @@ public: void attachCacheToHead(SkGlyphCache*); // can only be called when the mutex is already held - void internalDetachCache(SkGlyphCache*); - void internalAttachCacheToHead(SkGlyphCache*); - -private: - SkGlyphCache* fHead; - size_t fTotalMemoryUsed; - size_t fCacheSizeLimit; - int32_t fCacheCountLimit; - int32_t fCacheCount; + void internalMoveToHead(SkGlyphCache *); // Checkout budgets, modulated by the specified min-bytes-needed-to-purge, // and attempt to purge caches to match. // Returns number of bytes freed. + void internalDetachCache(SkGlyphCache* cache); size_t internalPurge(size_t minBytesNeeded = 0); + +private: + SkGlyphCache* fHead; + SkAtomic fTotalMemoryUsed; + size_t fCacheSizeLimit; + int32_t fCacheCountLimit; + int32_t fCacheCount; + }; #endif