Speculative revert to diagnose crash in chrome. Revert "Add SkCachedData and use it for SkMipMap"

This reverts commit 92561a0b99.

crasher in question:
https://code.google.com/p/chromium/issues/detail?id=420178

Review URL: https://codereview.chromium.org/617613003
This commit is contained in:
reed 2014-10-03 13:23:30 -07:00 committed by Commit bot
parent 9e96aa07db
commit 37c5a815d8
14 changed files with 46 additions and 522 deletions

View File

@ -42,7 +42,6 @@
'<(skia_src_path)/core/SkBlitter_RGB16.cpp',
'<(skia_src_path)/core/SkBlitter_Sprite.cpp',
'<(skia_src_path)/core/SkBuffer.cpp',
'<(skia_src_path)/core/SkCachedData.cpp',
'<(skia_src_path)/core/SkCanvas.cpp',
'<(skia_src_path)/core/SkChunkAlloc.cpp',
'<(skia_src_path)/core/SkClipStack.cpp',

View File

@ -58,7 +58,6 @@
'../tests/BlendTest.cpp',
'../tests/BlitRowTest.cpp',
'../tests/BlurTest.cpp',
'../tests/CachedDataTest.cpp',
'../tests/CachedDecodingPixelRefTest.cpp',
'../tests/CanvasStateHelpers.cpp',
'../tests/CanvasStateTest.cpp',

View File

@ -71,7 +71,7 @@ struct BitmapRec : public SkResourceCache::Rec {
};
#define CHECK_LOCAL(localCache, localName, globalName, ...) \
((localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__))
(localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__)
bool SkBitmapCache::Find(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY, SkBitmap* result,
SkResourceCache* localCache) {
@ -125,17 +125,18 @@ bool SkBitmapCache::Add(uint32_t genID, const SkIRect& subset, const SkBitmap& r
struct MipMapRec : public SkResourceCache::Rec {
MipMapRec(const SkBitmap& src, const SkMipMap* result)
: fKey(src.getGenerationID(), 0, 0, get_bounds_from_bitmap(src))
, fMipMap(result)
{
fMipMap->attachToCacheAndRef();
}
, fMipMap(SkRef(result))
{}
virtual ~MipMapRec() {
fMipMap->detachFromCacheAndUnref();
fMipMap->unref();
}
BitmapKey fKey;
const SkMipMap* fMipMap;
virtual const Key& getKey() const SK_OVERRIDE { return fKey; }
virtual size_t bytesUsed() const SK_OVERRIDE { return sizeof(fKey) + fMipMap->size(); }
virtual size_t bytesUsed() const SK_OVERRIDE { return sizeof(fKey) + fMipMap->getSize(); }
static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextMip) {
const MipMapRec& rec = static_cast<const MipMapRec&>(baseRec);
@ -145,33 +146,20 @@ struct MipMapRec : public SkResourceCache::Rec {
// mipmaps don't use the custom allocator yet, so we don't need to check pixels
return true;
}
private:
BitmapKey fKey;
const SkMipMap* fMipMap;
};
const SkMipMap* SkMipMapCache::FindAndRef(const SkBitmap& src, SkResourceCache* localCache) {
const SkMipMap* SkMipMapCache::FindAndRef(const SkBitmap& src) {
BitmapKey key(src.getGenerationID(), 0, 0, get_bounds_from_bitmap(src));
const SkMipMap* result;
if (!CHECK_LOCAL(localCache, find, Find, key, MipMapRec::Visitor, &result)) {
if (!SkResourceCache::Find(key, MipMapRec::Visitor, &result)) {
result = NULL;
}
return result;
}
static SkResourceCache::DiscardableFactory get_fact(SkResourceCache* localCache) {
return localCache ? localCache->GetDiscardableFactory()
: SkResourceCache::GetDiscardableFactory();
}
const SkMipMap* SkMipMapCache::AddAndRef(const SkBitmap& src, SkResourceCache* localCache) {
SkMipMap* mipmap = SkMipMap::Build(src, get_fact(localCache));
if (mipmap) {
MipMapRec* rec = SkNEW_ARGS(MipMapRec, (src, mipmap));
CHECK_LOCAL(localCache, add, Add, rec);
void SkMipMapCache::Add(const SkBitmap& src, const SkMipMap* result) {
if (result) {
SkResourceCache::Add(SkNEW_ARGS(MipMapRec, (src, result)));
}
return mipmap;
}

View File

@ -52,8 +52,8 @@ public:
class SkMipMapCache {
public:
static const SkMipMap* FindAndRef(const SkBitmap& src, SkResourceCache* localCache = NULL);
static const SkMipMap* AddAndRef(const SkBitmap& src, SkResourceCache* localCache = NULL);
static const SkMipMap* FindAndRef(const SkBitmap& src);
static void Add(const SkBitmap& src, const SkMipMap* result);
};
#endif

View File

@ -260,10 +260,11 @@ bool SkBitmapProcState::possiblyScaleImage() {
if (scaleSqd > SK_Scalar1) {
fCurrMip.reset(SkMipMapCache::FindAndRef(fOrigBitmap));
if (NULL == fCurrMip.get()) {
fCurrMip.reset(SkMipMapCache::AddAndRef(fOrigBitmap));
fCurrMip.reset(SkMipMap::Build(fOrigBitmap));
if (NULL == fCurrMip.get()) {
return false;
}
SkMipMapCache::Add(fOrigBitmap, fCurrMip);
}
SkScalar levelScale = SkScalarInvert(SkScalarSqrt(scaleSqd));

View File

@ -1,175 +0,0 @@
/*
* Copyright 2014 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "SkCachedData.h"
#include "SkRefCnt.h"
#include "SkDiscardableMemory.h"
SkCachedData::SkCachedData(void* data, size_t size)
: fData(data)
, fSize(size)
, fRefCnt(1)
, fStorageType(kMalloc_StorageType)
, fInCache(false)
, fIsLocked(true)
{
fStorage.fMalloc = data;
}
SkCachedData::SkCachedData(size_t size, SkDiscardableMemory* dm)
: fData(dm->data())
, fSize(size)
, fRefCnt(1)
, fStorageType(kDiscardableMemory_StorageType)
, fInCache(false)
, fIsLocked(true)
{
fStorage.fDM = dm;
}
SkCachedData::~SkCachedData() {
switch (fStorageType) {
case kMalloc_StorageType:
sk_free(fStorage.fMalloc);
break;
case kDiscardableMemory_StorageType:
SkDELETE(fStorage.fDM);
break;
}
}
class SkCachedData::AutoMutexWritable {
public:
AutoMutexWritable(const SkCachedData* cd) : fCD(const_cast<SkCachedData*>(cd)) {
fCD->fMutex.acquire();
fCD->validate();
}
~AutoMutexWritable() {
fCD->validate();
fCD->fMutex.release();
}
SkCachedData* get() { return fCD; }
SkCachedData* operator->() { return fCD; }
private:
SkCachedData* fCD;
};
void SkCachedData::internalRef(bool fromCache) const {
AutoMutexWritable(this)->inMutexRef(fromCache);
}
void SkCachedData::internalUnref(bool fromCache) const {
if (AutoMutexWritable(this)->inMutexUnref(fromCache)) {
// can't delete inside doInternalUnref, since it is locking a mutex (which we own)
SkDELETE(this);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
void SkCachedData::inMutexRef(bool fromCache) {
if ((1 == fRefCnt) && fInCache) {
this->inMutexLock();
}
fRefCnt += 1;
if (fromCache) {
SkASSERT(!fInCache);
fInCache = true;
}
}
bool SkCachedData::inMutexUnref(bool fromCache) {
switch (--fRefCnt) {
case 0:
// we're going to be deleted, so we need to be unlocked (for DiscardableMemory)
if (fIsLocked) {
this->inMutexUnlock();
}
break;
case 1:
if (fInCache && !fromCache) {
// If we're down to 1 owner, and that owner is the cache, this it is safe
// to unlock (and mutate fData) even if the cache is in a different thread,
// as the cache is NOT allowed to inspect or use fData.
this->inMutexUnlock();
}
break;
default:
break;
}
if (fromCache) {
SkASSERT(fInCache);
fInCache = false;
}
// return true when we need to be deleted
return 0 == fRefCnt;
}
void SkCachedData::inMutexLock() {
fMutex.assertHeld();
SkASSERT(!fIsLocked);
fIsLocked = true;
switch (fStorageType) {
case kMalloc_StorageType:
this->setData(fStorage.fMalloc);
break;
case kDiscardableMemory_StorageType:
if (fStorage.fDM->lock()) {
this->setData(fStorage.fDM->data());
} else {
this->setData(NULL); // signal failure to lock, contents are gone
}
break;
}
}
void SkCachedData::inMutexUnlock() {
fMutex.assertHeld();
SkASSERT(fIsLocked);
fIsLocked = false;
switch (fStorageType) {
case kMalloc_StorageType:
// nothing to do/check
break;
case kDiscardableMemory_StorageType:
if (fData) { // did the previous lock succeed?
fStorage.fDM->unlock();
}
break;
}
this->setData(NULL); // signal that we're in an unlocked state
}
///////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef SK_DEBUG
void SkCachedData::validate() const {
if (fIsLocked) {
SkASSERT((fInCache && fRefCnt > 1) || !fInCache);
switch (fStorageType) {
case kMalloc_StorageType:
SkASSERT(fData == fStorage.fMalloc);
break;
case kDiscardableMemory_StorageType:
// fData can be null or the actual value, depending if DM's lock succeeded
break;
}
} else {
SkASSERT((fInCache && 1 == fRefCnt) || (0 == fRefCnt));
SkASSERT(NULL == fData);
}
}
#endif

View File

@ -1,107 +0,0 @@
/*
* Copyright 2014 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkCachedData_DEFINED
#define SkCachedData_DEFINED
#include "SkThread.h"
class SkDiscardableMemory;
class SkCachedData : ::SkNoncopyable {
public:
SkCachedData(void* mallocData, size_t size);
SkCachedData(size_t size, SkDiscardableMemory*);
virtual ~SkCachedData();
size_t size() const { return fSize; }
const void* data() const { return fData; }
void* writable_data() { return fData; }
void ref() const { this->internalRef(false); }
void unref() const { this->internalUnref(false); }
int testing_only_getRefCnt() const { return fRefCnt; }
bool testing_only_isLocked() const { return fIsLocked; }
bool testing_only_isInCache() const { return fInCache; }
protected:
// called when fData changes. could be NULL.
virtual void onDataChange(void* oldData, void* newData) {}
private:
SkMutex fMutex; // could use a pool of these...
enum StorageType {
kDiscardableMemory_StorageType,
kMalloc_StorageType
};
union {
SkDiscardableMemory* fDM;
void* fMalloc;
} fStorage;
void* fData;
size_t fSize;
int fRefCnt; // low-bit means we're owned by the cache
StorageType fStorageType;
bool fInCache;
bool fIsLocked;
void internalRef(bool fromCache) const;
void internalUnref(bool fromCache) const;
void inMutexRef(bool fromCache);
bool inMutexUnref(bool fromCache); // returns true if we should delete "this"
void inMutexLock();
void inMutexUnlock();
// called whenever our fData might change (lock or unlock)
void setData(void* newData) {
if (newData != fData) {
// notify our subclasses of the change
this->onDataChange(fData, newData);
fData = newData;
}
}
class AutoMutexWritable;
public:
#ifdef SK_DEBUG
void validate() const;
#else
void validate() const {}
#endif
/*
* Attaching a data to to a SkResourceCache (only one at a time) enables the data to be
* unlocked when the cache is the only owner, thus freeing it to be purged (assuming the
* data is backed by a SkDiscardableMemory).
*
* When attached, it also automatically attempts to "lock" the data when the first client
* ref's the data (typically from a find(key, visitor) call).
*
* Thus the data will always be "locked" when a non-cache has a ref on it (whether or not
* the lock succeeded to recover the memory -- check data() to see if it is NULL).
*/
/*
* Call when adding this instance to a SkResourceCache::Rec subclass
* (typically in the Rec's constructor).
*/
void attachToCacheAndRef() const { this->internalRef(true); }
/*
* Call when removing this instance from a SkResourceCache::Rec subclass
* (typically in the Rec's destructor).
*/
void detachFromCacheAndUnref() const { this->internalUnref(true); }
};
#endif

View File

@ -109,18 +109,18 @@ static void downsampleby2_proc4444(SkBitmap* dst, int x, int y,
*dst->getAddr16(x >> 1, y >> 1) = (uint16_t)collaps4444(c >> 2);
}
size_t SkMipMap::AllocLevelsSize(int levelCount, size_t pixelSize) {
SkMipMap::Level* SkMipMap::AllocLevels(int levelCount, size_t pixelSize) {
if (levelCount < 0) {
return 0;
return NULL;
}
int64_t size = sk_64_mul(levelCount + 1, sizeof(Level)) + pixelSize;
if (!sk_64_isS32(size)) {
return 0;
return NULL;
}
return sk_64_asS32(size);
return (Level*)sk_malloc_throw(sk_64_asS32(size));
}
SkMipMap* SkMipMap::Build(const SkBitmap& src, SkDiscardableFactoryProc fact) {
SkMipMap* SkMipMap::Build(const SkBitmap& src) {
void (*proc)(SkBitmap* dst, int x, int y, const SkBitmap& src);
const SkColorType ct = src.colorType();
@ -165,27 +165,11 @@ SkMipMap* SkMipMap::Build(const SkBitmap& src, SkDiscardableFactoryProc fact) {
return NULL;
}
size_t storageSize = SkMipMap::AllocLevelsSize(countLevels, size);
if (0 == storageSize) {
Level* levels = SkMipMap::AllocLevels(countLevels, size);
if (NULL == levels) {
return NULL;
}
SkMipMap* mipmap;
if (fact) {
SkDiscardableMemory* dm = fact(storageSize);
if (NULL == dm) {
return NULL;
}
mipmap = SkNEW_ARGS(SkMipMap, (storageSize, dm));
} else {
mipmap = SkNEW_ARGS(SkMipMap, (sk_malloc_throw(storageSize), storageSize));
}
// init
mipmap->fCount = countLevels;
mipmap->fLevels = (Level*)mipmap->writable_data();
Level* levels = mipmap->fLevels;
uint8_t* baseAddr = (uint8_t*)&levels[countLevels];
uint8_t* addr = baseAddr;
int width = src.width();
@ -220,13 +204,25 @@ SkMipMap* SkMipMap::Build(const SkBitmap& src, SkDiscardableFactoryProc fact) {
}
SkASSERT(addr == baseAddr + size);
return mipmap;
return SkNEW_ARGS(SkMipMap, (levels, countLevels, size));
}
///////////////////////////////////////////////////////////////////////////////
//static int gCounter;
SkMipMap::SkMipMap(Level* levels, int count, size_t size)
: fSize(size), fLevels(levels), fCount(count) {
SkASSERT(levels);
SkASSERT(count > 0);
// SkDebugf("mips %d\n", ++gCounter);
}
SkMipMap::~SkMipMap() {
sk_free(fLevels);
// SkDebugf("mips %d\n", --gCounter);
}
static SkFixed compute_level(SkScalar scale) {
SkFixed s = SkAbs32(SkScalarToFixed(SkScalarInvert(scale)));
@ -239,10 +235,6 @@ static SkFixed compute_level(SkScalar scale) {
}
bool SkMipMap::extractLevel(SkScalar scale, Level* levelPtr) const {
if (NULL == fLevels) {
return false;
}
if (scale >= SK_Scalar1) {
return false;
}

View File

@ -8,17 +8,14 @@
#ifndef SkMipMap_DEFINED
#define SkMipMap_DEFINED
#include "SkCachedData.h"
#include "SkRefCnt.h"
#include "SkScalar.h"
class SkBitmap;
class SkDiscardableMemory;
typedef SkDiscardableMemory* (*SkDiscardableFactoryProc)(size_t bytes);
class SkMipMap : public SkCachedData {
class SkMipMap : public SkRefCnt {
public:
static SkMipMap* Build(const SkBitmap& src, SkDiscardableFactoryProc);
static SkMipMap* Build(const SkBitmap& src);
struct Level {
void* fPixels;
@ -29,22 +26,18 @@ public:
bool extractLevel(SkScalar scale, Level*) const;
protected:
virtual void onDataChange(void* oldData, void* newData) SK_OVERRIDE {
fLevels = (Level*)newData; // could be NULL
}
size_t getSize() const { return fSize; }
private:
size_t fSize;
Level* fLevels;
int fCount;
// we take ownership of levels, and will free it with sk_free()
SkMipMap(void* malloc, size_t size) : INHERITED(malloc, size) {}
SkMipMap(size_t size, SkDiscardableMemory* dm) : INHERITED(size, dm) {}
SkMipMap(Level* levels, int count, size_t size);
virtual ~SkMipMap();
static size_t AllocLevelsSize(int levelCount, size_t pixelSize);
typedef SkCachedData INHERITED;
static Level* AllocLevels(int levelCount, size_t pixelSize);
};
#endif

View File

@ -5,7 +5,6 @@
* found in the LICENSE file.
*/
#include "SkCachedData.h"
#include "SkChecksum.h"
#include "SkResourceCache.h"
#include "SkMipMap.h"
@ -264,15 +263,6 @@ size_t SkResourceCache::setTotalByteLimit(size_t newLimit) {
return prevLimit;
}
SkCachedData* SkResourceCache::newCachedData(size_t bytes) {
if (fDiscardableFactory) {
SkDiscardableMemory* dm = fDiscardableFactory(bytes);
return dm ? SkNEW_ARGS(SkCachedData, (bytes, dm)) : NULL;
} else {
return SkNEW_ARGS(SkCachedData, (sk_malloc_throw(bytes), bytes));
}
}
///////////////////////////////////////////////////////////////////////////////
void SkResourceCache::detach(Rec* rec) {
@ -452,11 +442,6 @@ SkBitmap::Allocator* SkResourceCache::GetAllocator() {
return get_cache()->allocator();
}
SkCachedData* SkResourceCache::NewCachedData(size_t bytes) {
SkAutoMutexAcquire am(gMutex);
return get_cache()->newCachedData(bytes);
}
void SkResourceCache::Dump() {
SkAutoMutexAcquire am(gMutex);
get_cache()->dump();

View File

@ -10,7 +10,6 @@
#include "SkBitmap.h"
class SkCachedData;
class SkDiscardableMemory;
class SkMipMap;
@ -137,8 +136,6 @@ public:
*/
static SkBitmap::Allocator* GetAllocator();
static SkCachedData* NewCachedData(size_t bytes);
/**
* Call SkDebugf() with diagnostic information about the state of the cache
*/
@ -200,8 +197,6 @@ public:
DiscardableFactory discardableFactory() const { return fDiscardableFactory; }
SkBitmap::Allocator* allocator() const { return fAllocator; };
SkCachedData* newCachedData(size_t bytes);
/**
* Call SkDebugf() with diagnostic information about the state of the cache
*/

View File

@ -1,95 +0,0 @@
/*
* Copyright 2014 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "SkCachedData.h"
#include "SkDiscardableMemoryPool.h"
#include "Test.h"
enum LockedState {
kUnlocked,
kLocked,
};
enum CachedState {
kNotInCache,
kInCache,
};
static void check_data(skiatest::Reporter* reporter, SkCachedData* data,
int refcnt, CachedState cacheState, LockedState lockedState) {
REPORTER_ASSERT(reporter, data->testing_only_getRefCnt() == refcnt);
REPORTER_ASSERT(reporter, data->testing_only_isInCache() == (kInCache == cacheState));
REPORTER_ASSERT(reporter, data->testing_only_isLocked() == (lockedState == kLocked));
}
static SkCachedData* make_data(size_t size, SkDiscardableMemoryPool* pool) {
if (pool) {
SkDiscardableMemory* dm = pool->create(size);
// the pool "can" return null, but it shouldn't in these controlled conditions
SK_ALWAYSBREAK(dm);
return SkNEW_ARGS(SkCachedData, (size, dm));
} else {
return SkNEW_ARGS(SkCachedData, (sk_malloc_throw(size), size));
}
}
// returns with the data locked by client and cache
static SkCachedData* test_locking(skiatest::Reporter* reporter,
size_t size, SkDiscardableMemoryPool* pool) {
SkCachedData* data = make_data(size, pool);
memset(data->writable_data(), 0x80, size); // just to use writable_data()
check_data(reporter, data, 1, kNotInCache, kLocked);
data->ref();
check_data(reporter, data, 2, kNotInCache, kLocked);
data->unref();
check_data(reporter, data, 1, kNotInCache, kLocked);
data->attachToCacheAndRef();
check_data(reporter, data, 2, kInCache, kLocked);
data->unref();
check_data(reporter, data, 1, kInCache, kUnlocked);
data->ref();
check_data(reporter, data, 2, kInCache, kLocked);
return data;
}
/*
* SkCachedData behaves differently (regarding its locked/unlocked state) depending on
* when it is in the cache or not. Being in the cache is signaled by calling attachToCacheAndRef()
* instead of ref(). (and balanced by detachFromCacheAndUnref).
*
* Thus, among other things, we test the end-of-life behavior when the client is the last owner
* and when the cache is.
*/
DEF_TEST(CachedData, reporter) {
SkAutoTUnref<SkDiscardableMemoryPool> pool(SkDiscardableMemoryPool::Create(1000));
for (int useDiscardable = 0; useDiscardable <= 1; ++useDiscardable) {
const size_t size = 100;
// test with client as last owner
SkCachedData* data = test_locking(reporter, size, useDiscardable ? pool.get() : NULL);
check_data(reporter, data, 2, kInCache, kLocked);
data->detachFromCacheAndUnref();
check_data(reporter, data, 1, kNotInCache, kLocked);
data->unref();
// test with cache as last owner
data = test_locking(reporter, size, useDiscardable ? pool.get() : NULL);
check_data(reporter, data, 2, kInCache, kLocked);
data->unref();
check_data(reporter, data, 1, kInCache, kUnlocked);
data->detachFromCacheAndUnref();
}
}

View File

@ -25,7 +25,7 @@ DEF_TEST(MipMap, reporter) {
for (int i = 0; i < 500; ++i) {
make_bitmap(&bm, rand);
SkAutoTUnref<SkMipMap> mm(SkMipMap::Build(bm, NULL));
SkAutoTUnref<SkMipMap> mm(SkMipMap::Build(bm));
REPORTER_ASSERT(reporter, !mm->extractLevel(SK_Scalar1, NULL));
REPORTER_ASSERT(reporter, !mm->extractLevel(SK_Scalar1 * 2, NULL));

View File

@ -121,55 +121,6 @@ DEF_TEST(BitmapCache_add_rect, reporter) {
REPORTER_ASSERT(reporter, SkBitmapCache::Find(cachedBitmap.getGenerationID(), rect, &bm, cache));
}
#include "SkMipMap.h"
enum LockedState {
kNotLocked,
kLocked,
};
enum CachedState {
kNotInCache,
kInCache,
};
static void check_data(skiatest::Reporter* reporter, const SkCachedData* data,
int refcnt, CachedState cacheState, LockedState lockedState) {
REPORTER_ASSERT(reporter, data->testing_only_getRefCnt() == refcnt);
REPORTER_ASSERT(reporter, data->testing_only_isInCache() == (kInCache == cacheState));
bool isLocked = (data->data() != NULL);
REPORTER_ASSERT(reporter, isLocked == (lockedState == kLocked));
}
static void test_mipmapcache(skiatest::Reporter* reporter, SkResourceCache* cache) {
cache->purgeAll();
SkBitmap src;
src.allocN32Pixels(5, 5);
src.setImmutable();
const SkMipMap* mipmap = SkMipMapCache::FindAndRef(src, cache);
REPORTER_ASSERT(reporter, NULL == mipmap);
mipmap = SkMipMapCache::AddAndRef(src, cache);
REPORTER_ASSERT(reporter, mipmap);
check_data(reporter, mipmap, 2, kInCache, kLocked);
mipmap->unref();
// tricky, since technically after this I'm no longer an owner, but since the cache is
// local, I know it won't get purged behind my back
check_data(reporter, mipmap, 1, kInCache, kNotLocked);
// find us again
mipmap = SkMipMapCache::FindAndRef(src, cache);
check_data(reporter, mipmap, 2, kInCache, kLocked);
cache->purgeAll();
check_data(reporter, mipmap, 1, kNotInCache, kLocked);
mipmap->unref();
}
DEF_TEST(BitmapCache_discarded_bitmap, reporter) {
SkResourceCache::DiscardableFactory factory = SkResourceCache::GetDiscardableFactory();
SkBitmap::Allocator* allocator = SkBitmapCache::GetAllocator();
@ -214,6 +165,4 @@ DEF_TEST(BitmapCache_discarded_bitmap, reporter) {
// We can add the bitmap back to the cache and find it again.
REPORTER_ASSERT(reporter, SkBitmapCache::Add(cachedBitmap.getGenerationID(), rect, cachedBitmap, cache));
REPORTER_ASSERT(reporter, SkBitmapCache::Find(cachedBitmap.getGenerationID(), rect, &bm, cache));
test_mipmapcache(reporter, cache);
}