retool image cache to be generic cache, allowing the client to subclass "Rec", so they can provide a custom Key and arbitrary Value.

Follow-on CLs

- rename ScaledimageCache to something like GeneralCache
- explore if we can use call-backs or some mechanism to completely hide "lock/unlock", by forcing all clients to support "copying" their value out of the cache as the result of a Find.

R=mtklein@google.com, senorblanco@google.com, bsalomon@google.com, qiankun.miao@intel.com, senorblanco@chromium.org

Author: reed@google.com

Review URL: https://codereview.chromium.org/507483002
This commit is contained in:
reed 2014-08-26 09:08:04 -07:00 committed by Commit bot
parent 97b49478cf
commit 680fb9e8f1
11 changed files with 262 additions and 389 deletions

View File

@ -19,6 +19,15 @@ public:
this->init(sizeof(fPtr) + sizeof(fValue));
}
};
struct TestRec : public SkScaledImageCache::Rec {
TestKey fKey;
intptr_t fValue;
TestRec(const TestKey& key, intptr_t value) : fKey(key), fValue(value) {}
virtual const Key& getKey() const SK_OVERRIDE { return fKey; }
virtual size_t bytesUsed() const SK_OVERRIDE { return sizeof(fKey) + sizeof(fValue); }
};
}
class ImageCacheBench : public Benchmark {
@ -36,10 +45,7 @@ public:
void populateCache() {
for (int i = 0; i < CACHE_COUNT; ++i) {
TestKey key(i);
SkBitmap tmp;
tmp.allocN32Pixels(1, 1);
fCache.unlock(fCache.addAndLock(key, tmp));
fCache.unlock(fCache.addAndLock(SkNEW_ARGS(TestRec, (TestKey(i), i))));
}
}
@ -54,10 +60,9 @@ protected:
}
TestKey key(-1);
SkBitmap tmp;
// search for a miss (-1 scale)
// search for a miss (-1)
for (int i = 0; i < loops; ++i) {
SkDEBUGCODE(SkScaledImageCache::ID* id =) fCache.findAndLock(key, &tmp);
SkDEBUGCODE(SkScaledImageCache::ID id =) fCache.findAndLock(key);
SkASSERT(NULL == id);
}
}

View File

@ -6,6 +6,8 @@
*/
#include "SkBitmapCache.h"
#include "SkScaledImageCache.h"
#include "SkMipMap.h"
#include "SkRect.h"
/**
@ -41,51 +43,103 @@ public:
//////////////////////////////////////////////////////////////////////////////////////////
SkScaledImageCache::ID* SkBitmapCache::FindAndLock(const SkBitmap& src,
SkScalar invScaleX, SkScalar invScaleY,
SkBitmap* result) {
struct BitmapRec : public SkScaledImageCache::Rec {
BitmapRec(uint32_t genID, SkScalar scaleX, SkScalar scaleY, const SkIRect& bounds,
const SkBitmap& result)
: fKey(genID, scaleX, scaleY, bounds)
, fBitmap(result)
{}
BitmapKey fKey;
SkBitmap fBitmap;
virtual const Key& getKey() const SK_OVERRIDE { return fKey; }
virtual size_t bytesUsed() const SK_OVERRIDE { return sizeof(fKey) + fBitmap.getSize(); }
};
static bool find_and_return(const BitmapKey& key, SkBitmap* result) {
const BitmapRec* rec = (BitmapRec*)SkScaledImageCache::FindAndLock(key);
if (rec) {
*result = rec->fBitmap;
SkScaledImageCache::Unlock(rec);
result->lockPixels();
if (result->getPixels()) {
return true;
}
// todo: we should explicitly purge rec from the cache at this point, since
// it is effectively purged already (has no memory behind it)
result->reset();
// fall-through to false
}
return false;
}
bool SkBitmapCache::Find(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY,
SkBitmap* result) {
if (0 == invScaleX || 0 == invScaleY) {
// degenerate, and the key we use for mipmaps
return NULL;
return false;
}
BitmapKey key(src.getGenerationID(), invScaleX, invScaleY, get_bounds_from_bitmap(src));
return SkScaledImageCache::FindAndLock(key, result);
return find_and_return(key, result);
}
SkScaledImageCache::ID* SkBitmapCache::AddAndLock(const SkBitmap& src,
SkScalar invScaleX, SkScalar invScaleY,
const SkBitmap& result) {
void SkBitmapCache::Add(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY,
const SkBitmap& result) {
if (0 == invScaleX || 0 == invScaleY) {
// degenerate, and the key we use for mipmaps
return NULL;
return;
}
BitmapKey key(src.getGenerationID(), invScaleX, invScaleY, get_bounds_from_bitmap(src));
return SkScaledImageCache::AddAndLock(key, result);
SkScaledImageCache::Add(SkNEW_ARGS(BitmapRec,
(src.getGenerationID(), invScaleX, invScaleY,
get_bounds_from_bitmap(src), result)));
}
////
SkScaledImageCache::ID* SkBitmapCache::FindAndLock(uint32_t genID, int width, int height,
SkBitmap* result) {
bool SkBitmapCache::Find(uint32_t genID, int width, int height, SkBitmap* result) {
BitmapKey key(genID, SK_Scalar1, SK_Scalar1, SkIRect::MakeWH(width, height));
return SkScaledImageCache::FindAndLock(key, result);
return find_and_return(key, result);
}
SkScaledImageCache::ID* SkBitmapCache::AddAndLock(uint32_t genID, int width, int height,
const SkBitmap& result) {
BitmapKey key(genID, SK_Scalar1, SK_Scalar1, SkIRect::MakeWH(width, height));
return SkScaledImageCache::AddAndLock(key, result);
void SkBitmapCache::Add(uint32_t genID, int width, int height, const SkBitmap& result) {
SkScaledImageCache::Add(SkNEW_ARGS(BitmapRec, (genID, SK_Scalar1, SK_Scalar1,
SkIRect::MakeWH(width, height), result)));
}
////
//////////////////////////////////////////////////////////////////////////////////////////
SkScaledImageCache::ID* SkMipMapCache::FindAndLock(const SkBitmap& src, const SkMipMap** result) {
BitmapKey key(src.getGenerationID(), SK_Scalar1, SK_Scalar1, get_bounds_from_bitmap(src));
return SkScaledImageCache::FindAndLock(key, result);
struct MipMapRec : public SkScaledImageCache::Rec {
MipMapRec(const SkBitmap& src, const SkMipMap* result)
: fKey(src.getGenerationID(), 0, 0, get_bounds_from_bitmap(src))
, fMipMap(SkRef(result))
{}
virtual ~MipMapRec() {
fMipMap->unref();
}
BitmapKey fKey;
const SkMipMap* fMipMap;
virtual const Key& getKey() const SK_OVERRIDE { return fKey; }
virtual size_t bytesUsed() const SK_OVERRIDE { return sizeof(fKey) + fMipMap->getSize(); }
};
const SkMipMap* SkMipMapCache::FindAndRef(const SkBitmap& src) {
BitmapKey key(src.getGenerationID(), 0, 0, get_bounds_from_bitmap(src));
const MipMapRec* rec = (MipMapRec*)SkScaledImageCache::FindAndLock(key);
const SkMipMap* result = NULL;
if (rec) {
result = SkRef(rec->fMipMap);
SkScaledImageCache::Unlock(rec);
}
return result;
}
SkScaledImageCache::ID* SkMipMapCache::AddAndLock(const SkBitmap& src, const SkMipMap* result) {
BitmapKey key(src.getGenerationID(), SK_Scalar1, SK_Scalar1, get_bounds_from_bitmap(src));
return SkScaledImageCache::AddAndLock(key, result);
void SkMipMapCache::Add(const SkBitmap& src, const SkMipMap* result) {
if (result) {
SkScaledImageCache::Add(SkNEW_ARGS(MipMapRec, (src, result)));
}
}

View File

@ -8,38 +8,33 @@
#ifndef SkBitmapCache_DEFINED
#define SkBitmapCache_DEFINED
#include "SkScaledImageCache.h"
#include "SkScalar.h"
class SkBitmap;
class SkMipMap;
class SkBitmapCache {
public:
typedef SkScaledImageCache::ID ID;
/**
* Search based on the src bitmap and inverse scales in X and Y. If found, returns true and
* result will be set to the matching bitmap with its pixels already locked.
*/
static bool Find(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY, SkBitmap* result);
static void Add(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY,
const SkBitmap& result);
static void Unlock(ID* id) {
SkScaledImageCache::Unlock(id);
}
/* Input: bitmap+inverse_scale */
static ID* FindAndLock(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY,
SkBitmap* result);
static ID* AddAndLock(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY,
const SkBitmap& result);
/* Input: bitmap_genID+width+height */
static ID* FindAndLock(uint32_t genID, int width, int height, SkBitmap* result);
static ID* AddAndLock(uint32_t genID, int width, int height, const SkBitmap& result);
/**
* Search based on the bitmap's genID, width, height. If found, returns true and
* result will be set to the matching bitmap with its pixels already locked.
*/
static bool Find(uint32_t genID, int width, int height, SkBitmap* result);
static void Add(uint32_t genID, int width, int height, const SkBitmap& result);
};
class SkMipMapCache {
public:
typedef SkScaledImageCache::ID ID;
static void Unlock(ID* id) {
SkScaledImageCache::Unlock(id);
}
static ID* FindAndLock(const SkBitmap& src, const SkMipMap** result);
static ID* AddAndLock(const SkBitmap& src, const SkMipMap* result);
static const SkMipMap* FindAndRef(const SkBitmap& src);
static void Add(const SkBitmap& src, const SkMipMap* result);
};
#endif

View File

@ -109,7 +109,7 @@ static SkScalar effective_matrix_scale_sqrd(const SkMatrix& mat) {
class AutoScaledCacheUnlocker {
public:
AutoScaledCacheUnlocker(SkScaledImageCache::ID** idPtr) : fIDPtr(idPtr) {}
AutoScaledCacheUnlocker(SkScaledImageCache::ID* idPtr) : fIDPtr(idPtr) {}
~AutoScaledCacheUnlocker() {
if (fIDPtr && *fIDPtr) {
SkScaledImageCache::Unlock(*fIDPtr);
@ -123,7 +123,7 @@ public:
}
private:
SkScaledImageCache::ID** fIDPtr;
SkScaledImageCache::ID* fIDPtr;
};
#define AutoScaledCacheUnlocker(...) SK_REQUIRE_LOCAL_VAR(AutoScaledCacheUnlocker)
@ -147,10 +147,7 @@ static inline bool cache_size_okay(const SkBitmap& bm, const SkMatrix& invMat) {
// the interface to the cache, but might be well worth it.
bool SkBitmapProcState::possiblyScaleImage() {
AutoScaledCacheUnlocker unlocker(&fScaledCacheID);
SkASSERT(NULL == fBitmap);
SkASSERT(NULL == fScaledCacheID);
if (fFilterLevel <= SkPaint::kLow_FilterLevel) {
return false;
@ -183,20 +180,7 @@ bool SkBitmapProcState::possiblyScaleImage() {
return false;
}
fScaledCacheID = SkBitmapCache::FindAndLock(fOrigBitmap, invScaleX, invScaleY,
&fScaledBitmap);
if (fScaledCacheID) {
fScaledBitmap.lockPixels();
if (!fScaledBitmap.getPixels()) {
fScaledBitmap.unlockPixels();
// found a purged entry (discardablememory?), release it
SkScaledImageCache::Unlock(fScaledCacheID);
fScaledCacheID = NULL;
// fall through to rebuild
}
}
if (NULL == fScaledCacheID) {
if (!SkBitmapCache::Find(fOrigBitmap, invScaleX, invScaleY, &fScaledBitmap)) {
float dest_width = fOrigBitmap.width() / invScaleX;
float dest_height = fOrigBitmap.height() / invScaleY;
@ -215,13 +199,7 @@ bool SkBitmapProcState::possiblyScaleImage() {
}
SkASSERT(NULL != fScaledBitmap.getPixels());
fScaledCacheID = SkBitmapCache::AddAndLock(fOrigBitmap, invScaleX, invScaleY,
fScaledBitmap);
if (!fScaledCacheID) {
fScaledBitmap.reset();
return false;
}
SkASSERT(NULL != fScaledBitmap.getPixels());
SkBitmapCache::Add(fOrigBitmap, invScaleX, invScaleY, fScaledBitmap);
}
SkASSERT(NULL != fScaledBitmap.getPixels());
@ -235,7 +213,6 @@ bool SkBitmapProcState::possiblyScaleImage() {
// image might require is some interpolation if the translation
// is fractional.
fFilterLevel = SkPaint::kLow_FilterLevel;
unlocker.release();
return true;
}
@ -280,39 +257,30 @@ bool SkBitmapProcState::possiblyScaleImage() {
* a scale > 1 to indicate down scaling by the CTM.
*/
if (scaleSqd > SK_Scalar1) {
const SkMipMap* mip = NULL;
SkASSERT(NULL == fScaledCacheID);
fScaledCacheID = SkMipMapCache::FindAndLock(fOrigBitmap, &mip);
if (!fScaledCacheID) {
SkASSERT(NULL == mip);
mip = SkMipMap::Build(fOrigBitmap);
if (mip) {
fScaledCacheID = SkMipMapCache::AddAndLock(fOrigBitmap, mip);
SkASSERT(mip->getRefCnt() > 1);
mip->unref(); // the cache took a ref
SkASSERT(fScaledCacheID);
fCurrMip.reset(SkMipMapCache::FindAndRef(fOrigBitmap));
if (NULL == fCurrMip.get()) {
fCurrMip.reset(SkMipMap::Build(fOrigBitmap));
if (NULL == fCurrMip.get()) {
return false;
}
} else {
SkASSERT(mip);
SkMipMapCache::Add(fOrigBitmap, fCurrMip);
}
if (mip) {
SkScalar levelScale = SkScalarInvert(SkScalarSqrt(scaleSqd));
SkMipMap::Level level;
if (mip->extractLevel(levelScale, &level)) {
SkScalar invScaleFixup = level.fScale;
fInvMatrix.postScale(invScaleFixup, invScaleFixup);
SkScalar levelScale = SkScalarInvert(SkScalarSqrt(scaleSqd));
SkMipMap::Level level;
if (fCurrMip->extractLevel(levelScale, &level)) {
SkScalar invScaleFixup = level.fScale;
fInvMatrix.postScale(invScaleFixup, invScaleFixup);
SkImageInfo info = fOrigBitmap.info();
info.fWidth = level.fWidth;
info.fHeight = level.fHeight;
fScaledBitmap.installPixels(info, level.fPixels, level.fRowBytes);
fBitmap = &fScaledBitmap;
fFilterLevel = SkPaint::kLow_FilterLevel;
unlocker.release();
return true;
}
SkImageInfo info = fOrigBitmap.info();
info.fWidth = level.fWidth;
info.fHeight = level.fHeight;
// todo: if we could wrap the fCurrMip in a pixelref, then we could just install
// that here, and not need to explicitly track it ourselves.
fScaledBitmap.installPixels(info, level.fPixels, level.fRowBytes);
fBitmap = &fScaledBitmap;
fFilterLevel = SkPaint::kLow_FilterLevel;
return true;
}
}
@ -336,12 +304,8 @@ static bool get_locked_pixels(const SkBitmap& src, int pow2, SkBitmap* dst) {
}
bool SkBitmapProcState::lockBaseBitmap() {
AutoScaledCacheUnlocker unlocker(&fScaledCacheID);
SkPixelRef* pr = fOrigBitmap.pixelRef();
SkASSERT(NULL == fScaledCacheID);
if (pr->isLocked() || !pr->implementsDecodeInto()) {
// fast-case, no need to look in our cache
fScaledBitmap = fOrigBitmap;
@ -350,19 +314,7 @@ bool SkBitmapProcState::lockBaseBitmap() {
return false;
}
} else {
fScaledCacheID = SkBitmapCache::FindAndLock(fOrigBitmap, 1, 1, &fScaledBitmap);
if (fScaledCacheID) {
fScaledBitmap.lockPixels();
if (!fScaledBitmap.getPixels()) {
fScaledBitmap.unlockPixels();
// found a purged entry (discardablememory?), release it
SkScaledImageCache::Unlock(fScaledCacheID);
fScaledCacheID = NULL;
// fall through to rebuild
}
}
if (NULL == fScaledCacheID) {
if (!SkBitmapCache::Find(fOrigBitmap, 1, 1, &fScaledBitmap)) {
if (!get_locked_pixels(fOrigBitmap, 0, &fScaledBitmap)) {
return false;
}
@ -370,22 +322,14 @@ bool SkBitmapProcState::lockBaseBitmap() {
// TODO: if fScaled comes back at a different width/height than fOrig,
// we need to update the matrix we are using to sample from this guy.
fScaledCacheID = SkBitmapCache::AddAndLock(fOrigBitmap, 1, 1, fScaledBitmap);
if (!fScaledCacheID) {
fScaledBitmap.reset();
return false;
}
SkBitmapCache::Add(fOrigBitmap, 1, 1, fScaledBitmap);
}
}
fBitmap = &fScaledBitmap;
unlocker.release();
return true;
}
SkBitmapProcState::~SkBitmapProcState() {
if (fScaledCacheID) {
SkScaledImageCache::Unlock(fScaledCacheID);
}
SkDELETE(fBitmapFilter);
}
@ -396,8 +340,6 @@ bool SkBitmapProcState::chooseProcs(const SkMatrix& inv, const SkPaint& paint) {
fInvMatrix = inv;
fFilterLevel = paint.getFilterLevel();
SkASSERT(NULL == fScaledCacheID);
// possiblyScaleImage will look to see if it can rescale the image as a
// preprocess; either by scaling up to the target size, or by selecting
// a nearby mipmap level. If it does, it will adjust the working

View File

@ -13,6 +13,7 @@
#include "SkBitmap.h"
#include "SkBitmapFilter.h"
#include "SkMatrix.h"
#include "SkMipMap.h"
#include "SkPaint.h"
#include "SkScaledImageCache.h"
@ -36,7 +37,7 @@ class SkPaint;
struct SkBitmapProcState {
SkBitmapProcState(): fScaledCacheID(NULL), fBitmapFilter(NULL) {}
SkBitmapProcState() : fBitmapFilter(NULL) {}
~SkBitmapProcState();
typedef void (*ShaderProc32)(const SkBitmapProcState&, int x, int y,
@ -142,7 +143,8 @@ private:
SkBitmap fOrigBitmap; // CONSTRUCTOR
SkBitmap fScaledBitmap; // chooseProcs
SkScaledImageCache::ID* fScaledCacheID;
SkAutoTUnref<const SkMipMap> fCurrMip;
// SkScaledImageCache::ID fScaledCacheID;
MatrixProc chooseMatrixProc(bool trivial_matrix);
bool chooseProcs(const SkMatrix& inv, const SkPaint&);

View File

@ -21,14 +21,6 @@
#define SK_DEFAULT_IMAGE_CACHE_LIMIT (2 * 1024 * 1024)
#endif
static inline SkScaledImageCache::ID* rec_to_id(SkScaledImageCache::Rec* rec) {
return reinterpret_cast<SkScaledImageCache::ID*>(rec);
}
static inline SkScaledImageCache::Rec* id_to_rec(SkScaledImageCache::ID* id) {
return reinterpret_cast<SkScaledImageCache::Rec*>(id);
}
void SkScaledImageCache::Key::init(size_t length) {
SkASSERT(SkAlign4(length) == length);
// 2 is fCount32 and fHash
@ -37,50 +29,6 @@ void SkScaledImageCache::Key::init(size_t length) {
fHash = SkChecksum::Murmur3(this->as32() + 2, (fCount32 - 2) << 2);
}
SkScaledImageCache::Key* SkScaledImageCache::Key::clone() const {
size_t size = fCount32 << 2;
void* copy = sk_malloc_throw(size);
memcpy(copy, this, size);
return (Key*)copy;
}
struct SkScaledImageCache::Rec {
Rec(const Key& key, const SkBitmap& bm) : fKey(key.clone()), fBitmap(bm) {
fLockCount = 1;
fMip = NULL;
}
Rec(const Key& key, const SkMipMap* mip) : fKey(key.clone()) {
fLockCount = 1;
fMip = mip;
mip->ref();
}
~Rec() {
SkSafeUnref(fMip);
sk_free(fKey);
}
static const Key& GetKey(const Rec& rec) { return *rec.fKey; }
static uint32_t Hash(const Key& key) { return key.hash(); }
size_t bytesUsed() const {
return fMip ? fMip->getSize() : fBitmap.getSize();
}
Rec* fNext;
Rec* fPrev;
// this guy wants to be 64bit aligned
Key* fKey;
int32_t fLockCount;
// we use either fBitmap or fMip, but not both
SkBitmap fBitmap;
const SkMipMap* fMip;
};
#include "SkTDynamicHash.h"
class SkScaledImageCache::Hash :
@ -259,11 +207,7 @@ SkScaledImageCache::~SkScaledImageCache() {
////////////////////////////////////////////////////////////////////////////////
/**
This private method is the fully general record finder. All other
record finders should call this function or the one above.
*/
SkScaledImageCache::Rec* SkScaledImageCache::findAndLock(const SkScaledImageCache::Key& key) {
const SkScaledImageCache::Rec* SkScaledImageCache::findAndLock(const Key& key) {
#ifdef USE_HASH
Rec* rec = fHash->find(key);
#else
@ -276,41 +220,13 @@ SkScaledImageCache::Rec* SkScaledImageCache::findAndLock(const SkScaledImageCach
return rec;
}
SkScaledImageCache::ID* SkScaledImageCache::findAndLock(const Key& key, SkBitmap* result) {
Rec* rec = this->findAndLock(key);
if (rec) {
SkASSERT(NULL == rec->fMip);
SkASSERT(rec->fBitmap.pixelRef());
*result = rec->fBitmap;
}
return rec_to_id(rec);
}
SkScaledImageCache::ID* SkScaledImageCache::findAndLock(const Key& key, const SkMipMap** mip) {
Rec* rec = this->findAndLock(key);
if (rec) {
SkASSERT(rec->fMip);
SkASSERT(NULL == rec->fBitmap.pixelRef());
*mip = rec->fMip;
}
return rec_to_id(rec);
}
////////////////////////////////////////////////////////////////////////////////
/**
This private method is the fully general record adder. All other
record adders should call this funtion. */
SkScaledImageCache::ID* SkScaledImageCache::addAndLock(SkScaledImageCache::Rec* rec) {
const SkScaledImageCache::Rec* SkScaledImageCache::addAndLock(Rec* rec) {
SkASSERT(rec);
// See if we already have this key (racy inserts, etc.)
Rec* existing = this->findAndLock(*rec->fKey);
const Rec* existing = this->findAndLock(rec->getKey());
if (NULL != existing) {
// Since we already have a matching entry, just delete the new one and return.
// Call sites cannot assume the passed in object will live past this call.
existing->fBitmap = rec->fBitmap;
SkDELETE(rec);
return rec_to_id(existing);
return existing;
}
this->addToHead(rec);
@ -321,20 +237,29 @@ SkScaledImageCache::ID* SkScaledImageCache::addAndLock(SkScaledImageCache::Rec*
#endif
// We may (now) be overbudget, so see if we need to purge something.
this->purgeAsNeeded();
return rec_to_id(rec);
return rec;
}
SkScaledImageCache::ID* SkScaledImageCache::addAndLock(const Key& key, const SkBitmap& scaled) {
Rec* rec = SkNEW_ARGS(Rec, (key, scaled));
return this->addAndLock(rec);
void SkScaledImageCache::add(Rec* rec) {
SkASSERT(rec);
// See if we already have this key (racy inserts, etc.)
const Rec* existing = this->findAndLock(rec->getKey());
if (NULL != existing) {
SkDELETE(rec);
this->unlock(existing);
return;
}
this->addToHead(rec);
SkASSERT(1 == rec->fLockCount);
#ifdef USE_HASH
SkASSERT(fHash);
fHash->add(rec);
#endif
this->unlock(rec);
}
SkScaledImageCache::ID* SkScaledImageCache::addAndLock(const Key& key, const SkMipMap* mip) {
Rec* rec = SkNEW_ARGS(Rec, (key, mip));
return this->addAndLock(rec);
}
void SkScaledImageCache::unlock(SkScaledImageCache::ID* id) {
void SkScaledImageCache::unlock(SkScaledImageCache::ID id) {
SkASSERT(id);
#ifdef SK_DEBUG
@ -342,7 +267,7 @@ void SkScaledImageCache::unlock(SkScaledImageCache::ID* id) {
bool found = false;
Rec* rec = fHead;
while (rec != NULL) {
if (rec == id_to_rec(id)) {
if (rec == id) {
found = true;
break;
}
@ -351,9 +276,10 @@ void SkScaledImageCache::unlock(SkScaledImageCache::ID* id) {
SkASSERT(found);
}
#endif
Rec* rec = id_to_rec(id);
const Rec* rec = id;
SkASSERT(rec->fLockCount > 0);
rec->fLockCount -= 1;
// We're under our lock, and we're the only possible mutator, so unconsting is fine.
const_cast<Rec*>(rec)->fLockCount -= 1;
// we may have been over-budget, but now have released something, so check
// if we should purge.
@ -389,7 +315,7 @@ void SkScaledImageCache::purgeAsNeeded() {
SkASSERT(used <= bytesUsed);
this->detach(rec);
#ifdef USE_HASH
fHash->remove(*rec->fKey);
fHash->remove(rec->getKey());
#endif
SkDELETE(rec);
@ -575,27 +501,7 @@ static SkScaledImageCache* get_cache() {
return gScaledImageCache;
}
SkScaledImageCache::ID* SkScaledImageCache::FindAndLock(const Key& key, SkBitmap* result) {
SkAutoMutexAcquire am(gMutex);
return get_cache()->findAndLock(key, result);
}
SkScaledImageCache::ID* SkScaledImageCache::FindAndLock(const Key& key, SkMipMap const ** mip) {
SkAutoMutexAcquire am(gMutex);
return get_cache()->findAndLock(key, mip);
}
SkScaledImageCache::ID* SkScaledImageCache::AddAndLock(const Key& key, const SkBitmap& scaled) {
SkAutoMutexAcquire am(gMutex);
return get_cache()->addAndLock(key, scaled);
}
SkScaledImageCache::ID* SkScaledImageCache::AddAndLock(const Key& key, const SkMipMap* mip) {
SkAutoMutexAcquire am(gMutex);
return get_cache()->addAndLock(key, mip);
}
void SkScaledImageCache::Unlock(SkScaledImageCache::ID* id) {
void SkScaledImageCache::Unlock(SkScaledImageCache::ID id) {
SkAutoMutexAcquire am(gMutex);
get_cache()->unlock(id);
@ -637,6 +543,21 @@ size_t SkScaledImageCache::GetSingleAllocationByteLimit() {
return get_cache()->getSingleAllocationByteLimit();
}
const SkScaledImageCache::Rec* SkScaledImageCache::FindAndLock(const Key& key) {
SkAutoMutexAcquire am(gMutex);
return get_cache()->findAndLock(key);
}
const SkScaledImageCache::Rec* SkScaledImageCache::AddAndLock(Rec* rec) {
SkAutoMutexAcquire am(gMutex);
return get_cache()->addAndLock(rec);
}
void SkScaledImageCache::Add(Rec* rec) {
SkAutoMutexAcquire am(gMutex);
get_cache()->add(rec);
}
///////////////////////////////////////////////////////////////////////////////
#include "SkGraphics.h"

View File

@ -25,8 +25,6 @@ class SkMipMap;
*/
class SkScaledImageCache {
public:
struct ID;
struct Key {
// Call this to access your private contents. Must not use the address after calling init()
void* writableContents() { return this + 1; }
@ -49,9 +47,6 @@ public:
return true;
}
// delete using sk_free
Key* clone() const;
private:
// store fCount32 first, so we don't consider it in operator<
int32_t fCount32; // 2 + user contents count32
@ -62,6 +57,32 @@ public:
const uint32_t* as32SkipCount() const { return this->as32() + 1; }
};
struct Rec {
typedef SkScaledImageCache::Key Key;
Rec() : fLockCount(1) {}
virtual ~Rec() {}
uint32_t getHash() const { return this->getKey().hash(); }
virtual const Key& getKey() const = 0;
virtual size_t bytesUsed() const = 0;
// for SkTDynamicHash::Traits
static uint32_t Hash(const Key& key) { return key.hash(); }
static const Key& GetKey(const Rec& rec) { return rec.getKey(); }
private:
Rec* fNext;
Rec* fPrev;
int32_t fLockCount;
int32_t fPad;
friend class SkScaledImageCache;
};
typedef const Rec* ID;
/**
* Returns a locked/pinned SkDiscardableMemory instance for the specified
* number of bytes, or NULL on failure.
@ -73,13 +94,10 @@ public:
* instance of this cache.
*/
static ID* FindAndLock(const Key&, SkBitmap* result);
static ID* AddAndLock(const Key&, const SkBitmap& result);
static ID* FindAndLock(const Key&, const SkMipMap** result);
static ID* AddAndLock(const Key&, const SkMipMap* result);
static void Unlock(ID*);
static const Rec* FindAndLock(const Key& key);
static const Rec* AddAndLock(Rec*);
static void Add(Rec*);
static void Unlock(ID);
static size_t GetTotalBytesUsed();
static size_t GetTotalByteLimit();
@ -115,22 +133,9 @@ public:
explicit SkScaledImageCache(size_t byteLimit);
~SkScaledImageCache();
/**
* Search the cache for a matching key. If found, return its bitmap and return its ID pointer.
* Use the returned ID to unlock the cache when you are done using outBitmap.
*
* If a match is not found, outBitmap will be unmodifed, and NULL will be returned.
*/
ID* findAndLock(const Key& key, SkBitmap* outBitmap);
ID* findAndLock(const Key& key, const SkMipMap** returnedMipMap);
/**
* To add a new bitmap (or mipMap) to the cache, call
* AddAndLock. Use the returned ptr to unlock the cache when you
* are done using scaled.
*/
ID* addAndLock(const Key&, const SkBitmap& bitmap);
ID* addAndLock(const Key&, const SkMipMap* mipMap);
const Rec* findAndLock(const Key& key);
const Rec* addAndLock(Rec*);
void add(Rec*);
/**
* Given a non-null ID ptr returned by either findAndLock or addAndLock,
@ -138,7 +143,7 @@ public:
* if needed. After this, the cached bitmap should no longer be
* referenced by the caller.
*/
void unlock(ID*);
void unlock(ID);
size_t getTotalBytesUsed() const { return fTotalBytesUsed; }
size_t getTotalByteLimit() const { return fTotalByteLimit; }
@ -164,8 +169,6 @@ public:
*/
void dump() const;
public:
struct Rec;
private:
Rec* fHead;
Rec* fTail;
@ -182,9 +185,6 @@ private:
size_t fSingleAllocationByteLimit;
int fCount;
Rec* findAndLock(const Key& key);
ID* addAndLock(Rec* rec);
void purgeRec(Rec*);
void purgeAsNeeded();

View File

@ -30,13 +30,11 @@ SkCachingPixelRef::SkCachingPixelRef(const SkImageInfo& info,
: INHERITED(info)
, fImageGenerator(generator)
, fErrorInDecoding(false)
, fScaledCacheId(NULL)
, fRowBytes(rowBytes) {
SkASSERT(fImageGenerator != NULL);
}
SkCachingPixelRef::~SkCachingPixelRef() {
SkDELETE(fImageGenerator);
SkASSERT(NULL == fScaledCacheId);
// Assert always unlock before unref.
}
@ -46,48 +44,28 @@ bool SkCachingPixelRef::onNewLockPixels(LockRec* rec) {
}
const SkImageInfo& info = this->info();
SkBitmap bitmap;
SkASSERT(NULL == fScaledCacheId);
fScaledCacheId = SkBitmapCache::FindAndLock(this->getGenerationID(), info.fWidth, info.fHeight,
&bitmap);
if (NULL == fScaledCacheId) {
if (!SkBitmapCache::Find(this->getGenerationID(), info.fWidth, info.fHeight, &fLockedBitmap)) {
// Cache has been purged, must re-decode.
if (!bitmap.allocPixels(info, fRowBytes)) {
if (!fLockedBitmap.allocPixels(info, fRowBytes)) {
fErrorInDecoding = true;
return false;
}
SkAutoLockPixels autoLockPixels(bitmap);
if (!fImageGenerator->getPixels(info, bitmap.getPixels(), fRowBytes)) {
if (!fImageGenerator->getPixels(info, fLockedBitmap.getPixels(), fRowBytes)) {
fErrorInDecoding = true;
return false;
}
fScaledCacheId = SkBitmapCache::AddAndLock(this->getGenerationID(),
info.fWidth, info.fHeight, bitmap);
SkASSERT(fScaledCacheId != NULL);
SkBitmapCache::Add(this->getGenerationID(), info.fWidth, info.fHeight, fLockedBitmap);
}
// Now bitmap should contain a concrete PixelRef of the decoded
// image.
SkAutoLockPixels autoLockPixels(bitmap);
void* pixels = bitmap.getPixels();
// Now bitmap should contain a concrete PixelRef of the decoded image.
void* pixels = fLockedBitmap.getPixels();
SkASSERT(pixels != NULL);
// At this point, the autoLockPixels will unlockPixels()
// to remove bitmap's lock on the pixels. We will then
// destroy bitmap. The *only* guarantee that this pointer
// remains valid is the guarantee made by
// SkScaledImageCache that it will not destroy the *other*
// bitmap (SkScaledImageCache::Rec.fBitmap) that holds a
// reference to the concrete PixelRef while this record is
// locked.
rec->fPixels = pixels;
rec->fColorTable = NULL;
rec->fRowBytes = bitmap.rowBytes();
rec->fRowBytes = fLockedBitmap.rowBytes();
return true;
}
void SkCachingPixelRef::onUnlockPixels() {
SkASSERT(fScaledCacheId != NULL);
SkScaledImageCache::Unlock( static_cast<SkScaledImageCache::ID*>(fScaledCacheId));
fScaledCacheId = NULL;
fLockedBitmap.reset();
}

View File

@ -8,6 +8,7 @@
#ifndef SkCachingPixelRef_DEFINED
#define SkCachingPixelRef_DEFINED
#include "SkBitmapCache.h"
#include "SkImageInfo.h"
#include "SkImageGenerator.h"
#include "SkPixelRef.h"
@ -52,9 +53,10 @@ protected:
private:
SkImageGenerator* const fImageGenerator;
bool fErrorInDecoding;
void* fScaledCacheId;
const size_t fRowBytes;
SkBitmap fLockedBitmap;
SkCachingPixelRef(const SkImageInfo&, SkImageGenerator*, size_t rowBytes);
typedef SkPixelRef INHERITED;

View File

@ -19,10 +19,15 @@ struct TestingKey : public SkScaledImageCache::Key {
this->init(sizeof(fPtr) + sizeof(fValue));
}
};
}
struct TestingRec : public SkScaledImageCache::Rec {
TestingRec(const TestingKey& key, uint32_t value) : fKey(key), fValue(value) {}
static void make_bm(SkBitmap* bm, int w, int h) {
bm->allocN32Pixels(w, h);
TestingKey fKey;
intptr_t fValue;
virtual const Key& getKey() const SK_OVERRIDE { return fKey; }
virtual size_t bytesUsed() const SK_OVERRIDE { return sizeof(fKey) + sizeof(fValue); }
};
}
static const int COUNT = 10;
@ -30,44 +35,30 @@ static const int DIM = 256;
static void test_cache(skiatest::Reporter* reporter, SkScaledImageCache& cache,
bool testPurge) {
SkScaledImageCache::ID* id;
SkBitmap bm[COUNT];
SkScaledImageCache::ID id;
for (int i = 0; i < COUNT; ++i) {
make_bm(&bm[i], DIM, DIM);
}
TestingKey key(i);
for (int i = 0; i < COUNT; ++i) {
TestingKey key(bm[i].getGenerationID());
SkBitmap tmp;
const TestingRec* rec = (const TestingRec*)cache.findAndLock(key);
REPORTER_ASSERT(reporter, NULL == rec);
SkScaledImageCache::ID* id = cache.findAndLock(key, &tmp);
REPORTER_ASSERT(reporter, NULL == id);
TestingRec* newRec = SkNEW_ARGS(TestingRec, (key, i));
const TestingRec* addedRec = (const TestingRec*)cache.addAndLock(newRec);
REPORTER_ASSERT(reporter, NULL != addedRec);
make_bm(&tmp, DIM, DIM);
id = cache.addAndLock(key, tmp);
REPORTER_ASSERT(reporter, NULL != id);
SkBitmap tmp2;
SkScaledImageCache::ID* id2 = cache.findAndLock(key, &tmp2);
REPORTER_ASSERT(reporter, id == id2);
REPORTER_ASSERT(reporter, tmp.pixelRef() == tmp2.pixelRef());
REPORTER_ASSERT(reporter, tmp.width() == tmp2.width());
REPORTER_ASSERT(reporter, tmp.height() == tmp2.height());
cache.unlock(id2);
cache.unlock(id);
const TestingRec* foundRec = (const TestingRec*)cache.findAndLock(key);
REPORTER_ASSERT(reporter, foundRec == addedRec);
REPORTER_ASSERT(reporter, foundRec->fValue == i);
cache.unlock(foundRec);
cache.unlock(addedRec);
}
if (testPurge) {
// stress test, should trigger purges
for (size_t i = 0; i < COUNT * 100; ++i) {
TestingKey key(i);
SkBitmap tmp;
make_bm(&tmp, DIM, DIM);
SkScaledImageCache::ID* id = cache.addAndLock(key, tmp);
SkScaledImageCache::ID id = cache.addAndLock(SkNEW_ARGS(TestingRec, (key, i)));
REPORTER_ASSERT(reporter, NULL != id);
cache.unlock(id);
}
@ -75,9 +66,7 @@ static void test_cache(skiatest::Reporter* reporter, SkScaledImageCache& cache,
// test the originals after all that purging
for (int i = 0; i < COUNT; ++i) {
TestingKey key(bm[i].getGenerationID());
SkBitmap tmp;
id = cache.findAndLock(key, &tmp);
id = cache.findAndLock(TestingKey(i));
if (id) {
cache.unlock(id);
}
@ -118,27 +107,17 @@ DEF_TEST(ImageCache_doubleAdd, r) {
// Adding the same key twice should be safe.
SkScaledImageCache cache(4096);
SkBitmap original;
original.allocN32Pixels(40, 40);
TestingKey key(1);
SkBitmap scaled1;
scaled1.allocN32Pixels(20, 20);
SkBitmap scaled2;
scaled2.allocN32Pixels(20, 20);
TestingKey key(original.getGenerationID());
SkScaledImageCache::ID* id1 = cache.addAndLock(key, scaled1);
SkScaledImageCache::ID* id2 = cache.addAndLock(key, scaled2);
SkScaledImageCache::ID id1 = cache.addAndLock(SkNEW_ARGS(TestingRec, (key, 2)));
SkScaledImageCache::ID id2 = cache.addAndLock(SkNEW_ARGS(TestingRec, (key, 3)));
// We don't really care if id1 == id2 as long as unlocking both works.
cache.unlock(id1);
cache.unlock(id2);
SkBitmap tmp;
// Lookup should return the value that was added last.
SkScaledImageCache::ID* id = cache.findAndLock(key, &tmp);
REPORTER_ASSERT(r, NULL != id);
REPORTER_ASSERT(r, tmp.getGenerationID() == scaled2.getGenerationID());
cache.unlock(id);
// Lookup can return either value.
const TestingRec* rec = (const TestingRec*)cache.findAndLock(key);
REPORTER_ASSERT(r, NULL != rec);
REPORTER_ASSERT(r, 2 == rec->fValue || 3 == rec->fValue);
cache.unlock(rec);
}

View File

@ -17,12 +17,7 @@ static bool is_in_scaled_image_cache(const SkBitmap& orig,
SkScalar xScale,
SkScalar yScale) {
SkBitmap scaled;
SkScaledImageCache::ID* id = SkBitmapCache::FindAndLock(
orig, SkScalarInvert(xScale), SkScalarInvert(yScale), &scaled);
if (id) {
SkScaledImageCache::Unlock(id);
}
return id != NULL;
return SkBitmapCache::Find(orig, SkScalarInvert(xScale), SkScalarInvert(yScale), &scaled);
}
// Draw a scaled bitmap, then return true iff it has been cached.