Simplify cache IDs and keys.

R=robertphillips@google.com
Review URL: https://codereview.appspot.com/6954047

git-svn-id: http://skia.googlecode.com/svn/trunk@6914 2bbb7eff-a529-9590-31e7-b0007b416f81
This commit is contained in:
bsalomon@google.com 2012-12-20 14:18:10 +00:00
parent 9532953aa1
commit 4b86e3428b
22 changed files with 374 additions and 501 deletions

View File

@ -10,7 +10,6 @@
'gr_sources': [
'<(skia_include_path)/gpu/GrAARectRenderer.h',
'<(skia_include_path)/gpu/GrBackendEffectFactory.h',
'<(skia_include_path)/gpu/GrCacheID.h',
'<(skia_include_path)/gpu/GrClipData.h',
'<(skia_include_path)/gpu/GrColor.h',
'<(skia_include_path)/gpu/GrConfig.h',

View File

@ -1,99 +0,0 @@
/*
* Copyright 2012 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrCacheID_DEFINED
#define GrCacheID_DEFINED
#include "GrTypes.h"
///////////////////////////////////////////////////////////////////////////////
#define GR_DECLARE_RESOURCE_CACHE_TYPE() \
static int8_t GetResourceType();
#define GR_DEFINE_RESOURCE_CACHE_TYPE(ClassName) \
int8_t ClassName::GetResourceType() { \
static int8_t kResourceTypeID = 0; \
if (0 == kResourceTypeID) { \
kResourceTypeID = GrCacheID::GetNextResourceType(); \
} \
return kResourceTypeID; \
}
///////////////////////////////////////////////////////////////////////////////
#define GR_DECLARE_RESOURCE_CACHE_DOMAIN(AccessorName) \
static int8_t AccessorName();
#define GR_DEFINE_RESOURCE_CACHE_DOMAIN(ClassName, AccessorName) \
int8_t ClassName::AccessorName() { \
static int8_t kDomainID = 0; \
if (0 == kDomainID) { \
kDomainID = GrCacheID::GetNextDomain(); \
} \
return kDomainID; \
}
/**
* The cache ID adds structure to the IDs used for caching GPU resources. It
* is broken into three portions:
* the public portion - which is filled in by Skia clients
* the private portion - which is used by the cache (domain & type)
* the resource-specific portion - which is filled in by each GrResource-
* derived class.
*
* For the public portion each client of the cache makes up its own
* unique-per-resource identifier (e.g., bitmap genID). A public ID of
* 'kScratch_CacheID' indicates that the resource is a "scratch" resource.
* When used to acquire a resource it indicates the cache user is
* looking for a resource that matches a resource-subclass-specific set of
* dimensions such as width, height, buffer size, or pixel config, but not
* for particular resource contents (e.g., texel or vertex values). The public
* IDs are unique within a private ID value but not necessarily across
* private IDs.
*
* The domain portion identifies the cache client while the type field
* indicates the resource type. When the public portion indicates that the
* resource is a scratch resource, the domain field should be kUnrestricted
* so that scratch resources can be recycled across domains.
*/
class GrCacheID {
public:
uint64_t fPublicID;
uint32_t fResourceSpecific32;
uint8_t fDomain;
private:
uint8_t fResourceType;
public:
uint16_t fResourceSpecific16;
GrCacheID(uint8_t resourceType)
: fPublicID(kDefaultPublicCacheID)
, fDomain(GrCacheData::kScratch_ResourceDomain)
, fResourceType(resourceType) {
}
void toRaw(uint32_t v[4]);
uint8_t getResourceType() const { return fResourceType; }
/*
* Default value for public portion of GrCacheID
*/
static const uint64_t kDefaultPublicCacheID = 0;
static const uint8_t kInvalid_ResourceType = 0;
static uint8_t GetNextDomain();
static uint8_t GetNextResourceType();
};
#endif // GrCacheID_DEFINED

View File

@ -118,22 +118,21 @@ public:
// Textures
/**
* Create a new entry, based on the specified key and texture, and return
* a "locked" texture. Must call be balanced with an unlockTexture() call.
* Create a new entry, based on the specified key and texture and return it.
*
* @param params The texture params used to draw a texture may help determine
* the cache entry used. (e.g. different versions may exist
* for different wrap modes on GPUs with limited NPOT
* texture support). NULL implies clamp wrap modes.
* @param desc Description of the texture properties.
* @param cacheData Cache-specific properties (e.g., texture gen ID)
* @param cacheID Cache-specific properties (e.g., texture gen ID)
* @param srcData Pointer to the pixel values.
* @param rowBytes The number of bytes between rows of the texture. Zero
* implies tightly packed rows.
*/
GrTexture* createTexture(const GrTextureParams* params,
const GrTextureDesc& desc,
const GrCacheData& cacheData,
const GrCacheID& cacheID,
void* srcData, size_t rowBytes);
/**
@ -141,14 +140,14 @@ public:
* return it. The return value will be NULL if not found.
*
* @param desc Description of the texture properties.
* @param cacheData Cache-specific properties (e.g., texture gen ID)
* @param cacheID Cache-specific properties (e.g., texture gen ID)
* @param params The texture params used to draw a texture may help determine
* the cache entry used. (e.g. different versions may exist
* for different wrap modes on GPUs with limited NPOT
* texture support). NULL implies clamp wrap modes.
*/
GrTexture* findTexture(const GrTextureDesc& desc,
const GrCacheData& cacheData,
const GrCacheID& cacheID,
const GrTextureParams* params);
/**
* Determines whether a texture is in the cache. If the texture is found it
@ -156,7 +155,7 @@ public:
* the texture for deletion.
*/
bool isTextureInCache(const GrTextureDesc& desc,
const GrCacheData& cacheData,
const GrCacheID& cacheID,
const GrTextureParams* params) const;
/**
@ -191,11 +190,10 @@ public:
* such an API will create gaps in the tiling pattern. This includes clamp
* mode. (This may be addressed in a future update.)
*/
GrTexture* lockScratchTexture(const GrTextureDesc& desc,
ScratchTexMatch match);
GrTexture* lockScratchTexture(const GrTextureDesc&, ScratchTexMatch match);
/**
* When done with an entry, call unlockTexture(entry) on it, which returns
* When done with an entry, call unlockScratchTexture(entry) on it, which returns
* it to the cache, where it may be purged.
*/
void unlockScratchTexture(GrTexture* texture);
@ -913,7 +911,7 @@ private:
void internalDrawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke);
GrTexture* createResizedTexture(const GrTextureDesc& desc,
const GrCacheData& cacheData,
const GrCacheID& cacheID,
void* srcData,
size_t rowBytes,
bool needsFiltering);
@ -951,8 +949,7 @@ public:
GrAutoScratchTexture(GrContext* context,
const GrTextureDesc& desc,
GrContext::ScratchTexMatch match =
GrContext::kApprox_ScratchTexMatch)
GrContext::ScratchTexMatch match = GrContext::kApprox_ScratchTexMatch)
: fContext(NULL)
, fTexture(NULL) {
this->set(context, desc, match);
@ -996,8 +993,7 @@ public:
GrTexture* set(GrContext* context,
const GrTextureDesc& desc,
GrContext::ScratchTexMatch match =
GrContext::kApprox_ScratchTexMatch) {
GrContext::ScratchTexMatch match = GrContext::kApprox_ScratchTexMatch) {
this->reset();
fContext = context;

View File

@ -10,7 +10,6 @@
#define GrTexture_DEFINED
#include "GrSurface.h"
#include "GrCacheID.h"
class GrRenderTarget;
class GrResourceKey;
@ -20,8 +19,6 @@ class GrTexture : public GrSurface {
public:
SK_DECLARE_INST_COUNT(GrTexture)
GR_DECLARE_RESOURCE_CACHE_TYPE()
// from GrResource
/**
* Informational texture flags
@ -130,15 +127,12 @@ public:
#else
void validate() const {}
#endif
static GrResourceKey ComputeKey(const GrGpu* gpu,
const GrTextureParams* sampler,
const GrTextureParams* params,
const GrTextureDesc& desc,
const GrCacheData& cacheData,
bool scratch);
const GrCacheID& cacheID);
static GrResourceKey ComputeScratchKey(const GrTextureDesc& desc);
static bool NeedsResizing(const GrResourceKey& key);
static bool IsScratchTexture(const GrResourceKey& key);
static bool NeedsFiltering(const GrResourceKey& key);
protected:

View File

@ -459,44 +459,55 @@ struct GrTextureDesc {
};
/**
* GrCacheData holds user-provided cache-specific data. It is used in
* combination with the GrTextureDesc to construct a cache key for texture
* resources.
* GrCacheID is used create and find cached GrResources (e.g. GrTextures). The ID has two parts:
* the domain and the key. Domains simply allow multiple clients to use 0-based indices as their
* cache key without colliding. The key uniquely identifies a GrResource within the domain.
* Users of the cache must obtain a domain via GenerateDomain().
*/
struct GrCacheData {
/*
* Scratch textures should all have this value as their fClientCacheID
*/
static const uint64_t kScratch_CacheID = 0xBBBBBBBB;
struct GrCacheID {
public:
typedef uint8_t Domain;
struct Key {
union {
uint8_t fData8[16];
uint32_t fData32[4];
uint64_t fData64[2];
};
};
/**
* Resources in the "scratch" domain can be used by any domain. All
* scratch textures will have this as their domain.
*/
static const uint8_t kScratch_ResourceDomain = 0;
* A default cache ID is invalid; a set method must be called before the object is used.
*/
GrCacheID() { fDomain = kInvalid_Domain; }
// No default constructor is provided since, if you are creating one
// of these, you should definitely have a key (or be using the scratch
// key).
GrCacheData(uint64_t key)
: fClientCacheID(key)
, fResourceDomain(kScratch_ResourceDomain) {
/**
* Initialize the cache ID to a domain and key.
*/
GrCacheID(Domain domain, const Key& key) {
GrAssert(kInvalid_Domain != domain);
this->reset(domain, key);
}
/**
* A user-provided texture ID. It should be unique to the texture data and
* does not need to take into account the width or height. Two textures
* with the same ID but different dimensions will not collide. This field
* is only relevant for textures that will be cached.
*/
uint64_t fClientCacheID;
void reset(Domain domain, const Key& key) {
fDomain = domain;
memcpy(&fKey, &key, sizeof(Key));
}
/**
* Allows cache clients to cluster their textures inside domains (e.g.,
* alpha clip masks). Only relevant for cached textures.
*/
uint8_t fResourceDomain;
/** Has this been initialized to a valid domain */
bool isValid() const { return kInvalid_Domain != fDomain; }
const Key& getKey() const { GrAssert(this->isValid()); return fKey; }
Domain getDomain() const { GrAssert(this->isValid()); return fDomain; }
/** Creates a new unique ID domain. */
static Domain GenerateDomain();
private:
Key fKey;
Domain fDomain;
static const Domain kInvalid_Domain = 0;
};
/**

View File

@ -110,9 +110,6 @@ public:
class SkAutoCachedTexture; // used internally
protected:
bool isBitmapInTextureCache(const SkBitmap& bitmap,
const GrTextureParams& params) const;
// overrides from SkDevice
virtual bool onReadPixels(const SkBitmap& bitmap,
int x, int y,

View File

@ -75,9 +75,9 @@ static inline GrColor SkColor2GrColor(SkColor c) {
////////////////////////////////////////////////////////////////////////////////
GrTexture* GrLockCachedBitmapTexture(GrContext*,
const SkBitmap&,
const GrTextureParams*);
bool GrIsBitmapInCache(const GrContext*, const SkBitmap&, const GrTextureParams*);
GrTexture* GrLockCachedBitmapTexture(GrContext*, const SkBitmap&, const GrTextureParams*);
void GrUnlockCachedBitmapTexture(GrTexture*);

View File

@ -16,25 +16,27 @@
* Hash function class that can take a data chunk of any predetermined length. The hash function
* used is the One-at-a-Time Hash (http://burtleburtle.net/bob/hash/doobs.html).
*
* Keys are computed from Entry objects. Entry must be fully ordered by a member:
* int compare(const GrTBinHashKey<Entry, ..>& k);
* which returns negative if the Entry < k, 0 if it equals k, and positive if k < the Entry.
* Additionally, Entry must be flattenable into the key using setKeyData.
* Keys are computed from ENTRY objects. ENTRY must be fully ordered by a member:
* int compare(const GrTBinHashKey<ENTRY, ..>& k);
* which returns negative if the ENTRY < k, 0 if it equals k, and positive if k < the ENTRY.
* Additionally, ENTRY must be flattenable into the key using setKeyData.
*
* This class satisfies the requirements to be a key for a GrTHashTable.
*/
template<typename Entry, size_t KeySize>
template<typename ENTRY, size_t KEY_SIZE>
class GrTBinHashKey {
public:
enum { kKeySize = KEY_SIZE };
GrTBinHashKey() {
this->reset();
}
GrTBinHashKey(const GrTBinHashKey<Entry, KeySize>& other) {
GrTBinHashKey(const GrTBinHashKey<ENTRY, KEY_SIZE>& other) {
*this = other;
}
GrTBinHashKey<Entry, KeySize>& operator=(const GrTBinHashKey<Entry, KeySize>& other) {
GrTBinHashKey<ENTRY, KEY_SIZE>& operator=(const GrTBinHashKey<ENTRY, KEY_SIZE>& other) {
memcpy(this, &other, sizeof(*this));
return *this;
}
@ -50,11 +52,11 @@ public:
}
void setKeyData(const uint32_t* SK_RESTRICT data) {
GrAssert(GrIsALIGN4(KeySize));
memcpy(&fData, data, KeySize);
GrAssert(GrIsALIGN4(KEY_SIZE));
memcpy(&fData, data, KEY_SIZE);
uint32_t hash = 0;
size_t len = KeySize;
size_t len = KEY_SIZE;
while (len >= 4) {
hash += *data++;
hash += (fHash << 10);
@ -70,17 +72,17 @@ public:
fHash = hash;
}
int compare(const GrTBinHashKey<Entry, KeySize>& key) const {
int compare(const GrTBinHashKey<ENTRY, KEY_SIZE>& key) const {
GrAssert(fIsValid && key.fIsValid);
return memcmp(fData, key.fData, KeySize);
return memcmp(fData, key.fData, KEY_SIZE);
}
static bool EQ(const Entry& entry, const GrTBinHashKey<Entry, KeySize>& key) {
static bool EQ(const ENTRY& entry, const GrTBinHashKey<ENTRY, KEY_SIZE>& key) {
GrAssert(key.fIsValid);
return 0 == entry.compare(key);
}
static bool LT(const Entry& entry, const GrTBinHashKey<Entry, KeySize>& key) {
static bool LT(const ENTRY& entry, const GrTBinHashKey<ENTRY, KEY_SIZE>& key) {
GrAssert(key.fIsValid);
return entry.compare(key) < 0;
}
@ -90,9 +92,14 @@ public:
return fHash;
}
const uint8_t* getData() const {
GrAssert(fIsValid);
return fData;
}
private:
uint32_t fHash;
uint8_t fData[KeySize]; // Buffer for key storage
uint8_t fData[KEY_SIZE]; // Buffer for key storage
#if GR_DEBUG
public:

View File

@ -5,40 +5,21 @@
* found in the LICENSE file.
*/
#include "GrCacheID.h"
#include "GrTypes.h"
#include "SkThread.h" // for sk_atomic_inc
uint8_t GrCacheID::GetNextDomain() {
// 0 reserved for kUnrestricted_ResourceDomain
static int32_t gNextDomain = 1;
static const GrCacheID::Key kAssertKey;
GR_STATIC_ASSERT(sizeof(kAssertKey.fData8) == sizeof(kAssertKey.fData32));
GR_STATIC_ASSERT(sizeof(kAssertKey.fData8) == sizeof(kAssertKey.fData64));
GR_STATIC_ASSERT(sizeof(kAssertKey.fData8) == sizeof(kAssertKey));
GrCacheID::Domain GrCacheID::GenerateDomain() {
static int32_t gNextDomain = kInvalid_Domain + 1;
int32_t domain = sk_atomic_inc(&gNextDomain);
if (domain >= 256) {
if (domain >= 1 << (8 * sizeof(Domain))) {
GrCrash("Too many Cache Domains");
}
return (uint8_t) domain;
}
uint8_t GrCacheID::GetNextResourceType() {
// 0 reserved for kInvalid_ResourceType
static int32_t gNextResourceType = 1;
int32_t type = sk_atomic_inc(&gNextResourceType);
if (type >= 256) {
GrCrash("Too many Cache Resource Types");
}
return (uint8_t) type;
}
void GrCacheID::toRaw(uint32_t v[4]) {
GrAssert(4*sizeof(uint32_t) == sizeof(GrCacheID));
v[0] = (uint32_t) (fPublicID & 0xffffffffUL);
v[1] = (uint32_t) ((fPublicID >> 32) & 0xffffffffUL);
v[2] = fResourceSpecific32;
v[3] = fDomain << 24 |
fResourceType << 16 |
fResourceSpecific16;
return static_cast<Domain>(domain);
}

View File

@ -18,12 +18,9 @@
#include "GrAAConvexPathRenderer.h"
#include "GrAAHairLinePathRenderer.h"
#include "GrSWMaskHelper.h"
#include "GrCacheID.h"
#include "SkTLazy.h"
GR_DEFINE_RESOURCE_CACHE_DOMAIN(GrClipMaskManager, GetAlphaMaskDomain)
#define GR_AA_CLIP 1
typedef SkClipStack::Element Element;

View File

@ -41,8 +41,6 @@ class GrDrawState;
*/
class GrClipMaskManager : public GrNoncopyable {
public:
GR_DECLARE_RESOURCE_CACHE_DOMAIN(GetAlphaMaskDomain)
GrClipMaskManager()
: fGpu(NULL)
, fCurrClipMaskType(kNone_ClipMaskType) {

View File

@ -209,18 +209,20 @@ void convolve_gaussian(GrDrawTarget* target,
}
////////////////////////////////////////////////////////////////////////////////
GrTexture* GrContext::findTexture(const GrTextureDesc& desc,
const GrCacheData& cacheData,
const GrCacheID& cacheID,
const GrTextureParams* params) {
GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
GrResource* resource = fTextureCache->find(resourceKey);
return static_cast<GrTexture*>(resource);
}
bool GrContext::isTextureInCache(const GrTextureDesc& desc,
const GrCacheData& cacheData,
const GrCacheID& cacheID,
const GrTextureParams* params) const {
GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
return fTextureCache->hasKey(resourceKey);
}
@ -272,13 +274,13 @@ static void stretchImage(void* dst,
// The desired texture is NPOT and tiled but that isn't supported by
// the current hardware. Resize the texture to be a POT
GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
const GrCacheData& cacheData,
const GrCacheID& cacheID,
void* srcData,
size_t rowBytes,
bool needsFiltering) {
GrTexture* clampedTexture = this->findTexture(desc, cacheData, NULL);
GrTexture* clampedTexture = this->findTexture(desc, cacheID, NULL);
if (NULL == clampedTexture) {
clampedTexture = this->createTexture(NULL, desc, cacheData, srcData, rowBytes);
clampedTexture = this->createTexture(NULL, desc, cacheID, srcData, rowBytes);
if (NULL == clampedTexture) {
return NULL;
@ -350,22 +352,22 @@ GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
GrTexture* GrContext::createTexture(
const GrTextureParams* params,
const GrTextureDesc& desc,
const GrCacheData& cacheData,
const GrCacheID& cacheID,
void* srcData,
size_t rowBytes) {
SK_TRACE_EVENT0("GrContext::createAndLockTexture");
SK_TRACE_EVENT0("GrContext::createTexture");
#if GR_DUMP_TEXTURE_UPLOAD
GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight);
GrPrintf("GrContext::createTexture[%d %d]\n", desc.fWidth, desc.fHeight);
#endif
GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
SkAutoTUnref<GrTexture> texture;
if (GrTexture::NeedsResizing(resourceKey)) {
texture.reset(this->createResizedTexture(desc, cacheData,
srcData, rowBytes,
GrTexture::NeedsFiltering(resourceKey)));
texture.reset(this->createResizedTexture(desc, cacheID,
srcData, rowBytes,
GrTexture::NeedsFiltering(resourceKey)));
} else {
texture.reset(fGpu->createTexture(desc, srcData, rowBytes));
}
@ -377,15 +379,13 @@ GrTexture* GrContext::createTexture(
return texture;
}
GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc,
ScratchTexMatch match) {
GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
GrTextureDesc desc = inDesc;
GrCacheData cacheData(GrCacheData::kScratch_CacheID);
GrAssert((desc.fFlags & kRenderTarget_GrTextureFlagBit) ||
!(desc.fFlags & kNoStencil_GrTextureFlagBit));
if (kExact_ScratchTexMatch != match) {
if (kApprox_ScratchTexMatch == match) {
// bin by pow2 with a reasonable min
static const int MIN_SIZE = 256;
desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth));
@ -399,7 +399,7 @@ GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc,
bool doubledH = false;
do {
GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, desc, cacheData, true);
GrResourceKey key = GrTexture::ComputeScratchKey(desc);
// Ensure we have exclusive access to the texture so future 'find' calls don't return it
resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag);
// if we miss, relax the fit of the flags...
@ -433,10 +433,7 @@ GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc,
desc.fHeight = origHeight;
SkAutoTUnref<GrTexture> texture(fGpu->createTexture(desc, NULL, 0));
if (NULL != texture) {
GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL,
texture->desc(),
cacheData,
true);
GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc());
// Make the resource exclusive so future 'find' calls don't return it
fTextureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
resource = texture;
@ -475,7 +472,7 @@ void GrContext::unlockScratchTexture(GrTexture* texture) {
// If this is a scratch texture we detached it from the cache
// while it was locked (to avoid two callers simultaneously getting
// the same texture).
if (GrTexture::IsScratchTexture(texture->getCacheEntry()->key())) {
if (texture->getCacheEntry()->key().isScratch()) {
fTextureCache->makeNonExclusive(texture->getCacheEntry());
}

View File

@ -11,6 +11,20 @@
#include "GrResourceCache.h"
#include "GrResource.h"
GrResourceKey::ResourceType GrResourceKey::GenerateResourceType() {
static int32_t gNextType = 0;
int32_t type = sk_atomic_inc(&gNextType);
if (type >= (1 << 8 * sizeof(ResourceType))) {
GrCrash("Too many Resource Types");
}
return static_cast<ResourceType>(type);
}
///////////////////////////////////////////////////////////////////////////////
GrResourceEntry::GrResourceEntry(const GrResourceKey& key, GrResource* resource)
: fKey(key), fResource(resource) {
// we assume ownership of the resource, and will unref it when we die
@ -33,36 +47,6 @@ void GrResourceEntry::validate() const {
///////////////////////////////////////////////////////////////////////////////
class GrResourceCache::Key {
typedef GrResourceEntry T;
const GrResourceKey& fKey;
public:
Key(const GrResourceKey& key) : fKey(key) {}
uint32_t getHash() const { return fKey.hashIndex(); }
static bool LT(const T& entry, const Key& key) {
return entry.key() < key.fKey;
}
static bool EQ(const T& entry, const Key& key) {
return entry.key() == key.fKey;
}
#if GR_DEBUG
static uint32_t GetHash(const T& entry) {
return entry.key().hashIndex();
}
static bool LT(const T& a, const T& b) {
return a.key() < b.key();
}
static bool EQ(const T& a, const T& b) {
return a.key() == b.key();
}
#endif
};
///////////////////////////////////////////////////////////////////////////////
GrResourceCache::GrResourceCache(int maxCount, size_t maxBytes) :
fMaxCount(maxCount),
fMaxBytes(maxBytes) {

View File

@ -14,27 +14,12 @@
#include "GrConfig.h"
#include "GrTypes.h"
#include "GrTHashCache.h"
#include "GrBinHashKey.h"
#include "SkTInternalLList.h"
class GrResource;
class GrResourceEntry;
// return true if a<b, or false if b<a
//
#define RET_IF_LT_OR_GT(a, b) \
do { \
if ((a) < (b)) { \
return true; \
} \
if ((b) < (a)) { \
return false; \
} \
} while (0)
/**
* Helper class for GrResourceCache, the Key is used to identify src data for
* a resource. It is identified by 2 32bit data fields which can hold any
* data (uninterpreted by the cache) and a width/height.
*/
class GrResourceKey {
public:
enum {
@ -43,82 +28,118 @@ public:
kHashMask = kHashCount - 1
};
GrResourceKey(uint32_t p0, uint32_t p1, uint32_t p2, uint32_t p3) {
fP[0] = p0;
fP[1] = p1;
fP[2] = p2;
fP[3] = p3;
this->computeHashIndex();
static GrCacheID::Domain ScratchDomain() {
static const GrCacheID::Domain gDomain = GrCacheID::GenerateDomain();
return gDomain;
}
GrResourceKey(uint32_t v[4]) {
memcpy(fP, v, 4 * sizeof(uint32_t));
this->computeHashIndex();
}
/** Uniquely identifies the GrResource subclass in the key to avoid collisions
across resource types. */
typedef uint8_t ResourceType;
/** Flags set by the GrResource subclass. */
typedef uint8_t ResourceFlags;
/** Generate a unique ResourceType */
static ResourceType GenerateResourceType();
/** Creates a key for resource */
GrResourceKey(const GrCacheID& id, ResourceType type, ResourceFlags flags) {
this->init(id.getDomain(), id.getKey(), type, flags);
};
GrResourceKey(const GrResourceKey& src) {
memcpy(fP, src.fP, 4 * sizeof(uint32_t));
#if GR_DEBUG
this->computeHashIndex();
GrAssert(fHashIndex == src.fHashIndex);
#endif
fHashIndex = src.fHashIndex;
fKey = src.fKey;
}
GrResourceKey() {
fKey.fHashedKey.reset();
}
void reset(const GrCacheID& id, ResourceType type, ResourceFlags flags) {
this->init(id.getDomain(), id.getKey(), type, flags);
}
//!< returns hash value [0..kHashMask] for the key
int hashIndex() const { return fHashIndex; }
friend bool operator==(const GrResourceKey& a, const GrResourceKey& b) {
GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex);
return 0 == memcmp(a.fP, b.fP, 4 * sizeof(uint32_t));
int getHash() const {
return fKey.fHashedKey.getHash() & kHashMask;
}
friend bool operator!=(const GrResourceKey& a, const GrResourceKey& b) {
GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex);
return !(a == b);
bool isScratch() const {
return ScratchDomain() ==
*reinterpret_cast<const GrCacheID::Domain*>(fKey.fHashedKey.getData() +
kCacheIDDomainOffset);
}
friend bool operator<(const GrResourceKey& a, const GrResourceKey& b) {
RET_IF_LT_OR_GT(a.fP[0], b.fP[0]);
RET_IF_LT_OR_GT(a.fP[1], b.fP[1]);
RET_IF_LT_OR_GT(a.fP[2], b.fP[2]);
return a.fP[3] < b.fP[3];
ResourceType getResourceType() const {
return *reinterpret_cast<const ResourceType*>(fKey.fHashedKey.getData() +
kResourceTypeOffset);
}
uint32_t getValue32(int i) const {
GrAssert(i >=0 && i < 4);
return fP[i];
ResourceFlags getResourceFlags() const {
return *reinterpret_cast<const ResourceFlags*>(fKey.fHashedKey.getData() +
kResourceFlagsOffset);
}
int compare(const GrResourceKey& other) const {
return fKey.fHashedKey.compare(other.fKey.fHashedKey);
}
static bool LT(const GrResourceKey& a, const GrResourceKey& b) {
return a.compare(b) < 0;
}
static bool EQ(const GrResourceKey& a, const GrResourceKey& b) {
return 0 == a.compare(b);
}
inline static bool LT(const GrResourceEntry& entry, const GrResourceKey& key);
inline static bool EQ(const GrResourceEntry& entry, const GrResourceKey& key);
inline static bool LT(const GrResourceEntry& a, const GrResourceEntry& b);
inline static bool EQ(const GrResourceEntry& a, const GrResourceEntry& b);
private:
enum {
kCacheIDKeyOffset = 0,
kCacheIDDomainOffset = kCacheIDKeyOffset + sizeof(GrCacheID::Key),
kResourceTypeOffset = kCacheIDDomainOffset + sizeof(GrCacheID::Domain),
kResourceFlagsOffset = kResourceTypeOffset + sizeof(ResourceType),
kPadOffset = kResourceFlagsOffset + sizeof(ResourceFlags),
kKeySize = SkAlign4(kPadOffset),
kPadSize = kKeySize - kPadOffset
};
static uint32_t rol(uint32_t x) {
return (x >> 24) | (x << 8);
}
static uint32_t ror(uint32_t x) {
return (x >> 8) | (x << 24);
}
static uint32_t rohalf(uint32_t x) {
return (x >> 16) | (x << 16);
void init(const GrCacheID::Domain domain,
const GrCacheID::Key& key,
ResourceType type,
ResourceFlags flags) {
union {
uint8_t fKey8[kKeySize];
uint32_t fKey32[kKeySize / 4];
} keyData;
uint8_t* k = keyData.fKey8;
memcpy(k + kCacheIDKeyOffset, key.fData8, sizeof(GrCacheID::Key));
memcpy(k + kCacheIDDomainOffset, &domain, sizeof(GrCacheID::Domain));
memcpy(k + kResourceTypeOffset, &type, sizeof(ResourceType));
memcpy(k + kResourceFlagsOffset, &flags, sizeof(ResourceFlags));
memset(k + kPadOffset, 0, kPadSize);
fKey.fHashedKey.setKeyData(keyData.fKey32);
}
void computeHashIndex() {
uint32_t hash = fP[0] ^ rol(fP[1]) ^ ror(fP[2]) ^ rohalf(fP[3]);
// this way to mix and reduce hash to its index may have to change
// depending on how many bits we allocate to the index
hash ^= hash >> 16;
hash ^= hash >> 8;
fHashIndex = hash & kHashMask;
}
struct Key;
typedef GrTBinHashKey<Key, kKeySize> HashedKey;
uint32_t fP[4];
struct Key {
int compare(const HashedKey& hashedKey) const {
fHashedKey.compare(fHashedKey);
}
HashedKey fHashedKey;
};
// this is computed from the fP... fields
int fHashIndex;
friend class GrContext;
Key fKey;
};
///////////////////////////////////////////////////////////////////////////////
class GrResourceEntry {
@ -146,6 +167,22 @@ private:
friend class GrDLinkedList;
};
bool GrResourceKey::LT(const GrResourceEntry& entry, const GrResourceKey& key) {
return LT(entry.key(), key);
}
bool GrResourceKey::EQ(const GrResourceEntry& entry, const GrResourceKey& key) {
return EQ(entry.key(), key);
}
bool GrResourceKey::LT(const GrResourceEntry& a, const GrResourceEntry& b) {
return LT(a.key(), b.key());
}
bool GrResourceKey::EQ(const GrResourceEntry& a, const GrResourceEntry& b) {
return EQ(a.key(), b.key());
}
///////////////////////////////////////////////////////////////////////////////
#include "GrTHashCache.h"
@ -289,8 +326,7 @@ private:
void removeInvalidResource(GrResourceEntry* entry);
class Key;
GrTHashTable<GrResourceEntry, Key, 8> fCache;
GrTHashTable<GrResourceEntry, GrResourceKey, 8> fCache;
// We're an internal doubly linked list
typedef SkTInternalLList<GrResourceEntry> EntryList;

View File

@ -13,7 +13,6 @@
#include "GrResourceCache.h"
SK_DEFINE_INST_COUNT(GrStencilBuffer)
GR_DEFINE_RESOURCE_CACHE_TYPE(GrStencilBuffer)
void GrStencilBuffer::transferToCache() {
GrAssert(NULL == this->getCacheEntry());
@ -22,30 +21,28 @@ void GrStencilBuffer::transferToCache() {
}
namespace {
// we should never have more than one stencil buffer with same combo of
// (width,height,samplecount)
void gen_stencil_key_values(int width,
int height,
int sampleCnt,
GrCacheID* cacheID) {
cacheID->fPublicID = GrCacheID::kDefaultPublicCacheID;
cacheID->fResourceSpecific32 = width | (height << 16);
cacheID->fDomain = GrCacheData::kScratch_ResourceDomain;
GrAssert(sampleCnt >= 0 && sampleCnt < 256);
cacheID->fResourceSpecific16 = sampleCnt << 8;
// last 8 bits of 'fResourceSpecific16' is free for flags
// we should never have more than one stencil buffer with same combo of (width,height,samplecount)
void gen_cache_id(int width, int height, int sampleCnt, GrCacheID* cacheID) {
static const GrCacheID::Domain gStencilBufferDomain = GrCacheID::GenerateDomain();
GrCacheID::Key key;
uint32_t* keyData = key.fData32;
keyData[0] = width;
keyData[1] = height;
keyData[2] = sampleCnt;
GR_STATIC_ASSERT(sizeof(key) >= 3 * sizeof(uint32_t));
cacheID->reset(gStencilBufferDomain, key);
}
}
GrResourceKey GrStencilBuffer::ComputeKey(int width,
int height,
int sampleCnt) {
GrCacheID id(GrStencilBuffer::GetResourceType());
gen_stencil_key_values(width, height, sampleCnt, &id);
// All SBs are created internally to attach to RTs so they all use the same domain.
static const GrResourceKey::ResourceType gStencilBufferResourceType =
GrResourceKey::GenerateResourceType();
GrCacheID id;
gen_cache_id(width, height, sampleCnt, &id);
uint32_t v[4];
id.toRaw(v);
return GrResourceKey(v);
// we don't use any flags for SBs currently.
return GrResourceKey(id, gStencilBufferResourceType, 0);
}

View File

@ -12,7 +12,6 @@
#include "GrClipData.h"
#include "GrResource.h"
#include "GrCacheID.h"
class GrRenderTarget;
class GrResourceEntry;
@ -21,7 +20,6 @@ class GrResourceKey;
class GrStencilBuffer : public GrResource {
public:
SK_DECLARE_INST_COUNT(GrStencilBuffer);
GR_DECLARE_RESOURCE_CACHE_TYPE()
virtual ~GrStencilBuffer() {
// TODO: allow SB to be purged and detach itself from rts

View File

@ -226,13 +226,6 @@ void GrTHashTable<T, Key, kHashBits>::unrefAll() {
#if GR_DEBUG
template <typename T, typename Key, size_t kHashBits>
void GrTHashTable<T, Key, kHashBits>::validate() const {
for (size_t i = 0; i < GR_ARRAY_COUNT(fHash); i++) {
if (fHash[i]) {
unsigned hashIndex = hash2Index(Key::GetHash(*fHash[i]));
GrAssert(hashIndex == i);
}
}
int count = fSorted.count();
for (int i = 1; i < count; i++) {
GrAssert(Key::LT(*fSorted[i - 1], *fSorted[i]) ||

View File

@ -15,7 +15,6 @@
#include "GrResourceCache.h"
SK_DEFINE_INST_COUNT(GrTexture)
GR_DEFINE_RESOURCE_CACHE_TYPE(GrTexture)
/**
* This method allows us to interrupt the normal deletion process and place
@ -116,97 +115,73 @@ void GrTexture::validateDesc() const {
}
}
// These flags need to fit in <= 8 bits so they can be folded into the texture
// These flags need to fit in a GrResourceKey::ResourceFlags so they can be folded into the texture
// key
enum TextureBits {
/*
* The kNPOT bit is set when the texture is NPOT and is being repeated
* but the hardware doesn't support that feature.
enum TextureFlags {
/**
* The kStretchToPOT bit is set when the texture is NPOT and is being repeated but the
* hardware doesn't support that feature.
*/
kNPOT_TextureBit = 0x1,
/*
* The kFilter bit can only be set when the kNPOT flag is set and indicates
* whether the resizing of the texture should use filtering. This is
* to handle cases where the original texture is indexed to disable
* filtering.
kStretchToPOT_TextureFlag = 0x1,
/**
* The kFilter bit can only be set when the kStretchToPOT flag is set and indicates whether the
* stretched texture should be bilerp filtered or point sampled.
*/
kFilter_TextureBit = 0x2,
/*
* The kScratch bit is set if the texture is being used as a scratch
* texture.
*/
kScratch_TextureBit = 0x4,
kFilter_TextureFlag = 0x2,
};
namespace {
void gen_texture_key_values(const GrGpu* gpu,
const GrTextureParams* params,
const GrTextureDesc& desc,
const GrCacheData& cacheData,
bool scratch,
GrCacheID* cacheID) {
uint64_t clientKey = cacheData.fClientCacheID;
if (scratch) {
// Instead of a client-provided key of the texture contents
// we create a key from the descriptor.
GrAssert(GrCacheData::kScratch_CacheID == clientKey);
clientKey = (desc.fFlags << 8) | ((uint64_t) desc.fConfig << 32);
}
cacheID->fPublicID = clientKey;
cacheID->fDomain = cacheData.fResourceDomain;
// we assume we only need 16 bits of width and height
// assert that texture creation will fail anyway if this assumption
// would cause key collisions.
GrAssert(gpu->getCaps().maxTextureSize() <= SK_MaxU16);
cacheID->fResourceSpecific32 = desc.fWidth | (desc.fHeight << 16);
GrAssert(desc.fSampleCnt >= 0 && desc.fSampleCnt < 256);
cacheID->fResourceSpecific16 = desc.fSampleCnt << 8;
if (!gpu->getCaps().npotTextureTileSupport()) {
bool isPow2 = GrIsPow2(desc.fWidth) && GrIsPow2(desc.fHeight);
bool tiled = NULL != params && params->isTiled();
if (tiled && !isPow2) {
cacheID->fResourceSpecific16 |= kNPOT_TextureBit;
GrResourceKey::ResourceFlags get_texture_flags(const GrGpu* gpu,
const GrTextureParams* params,
const GrTextureDesc& desc) {
GrResourceKey::ResourceFlags flags = 0;
bool tiled = NULL != params && params->isTiled();
if (tiled & !gpu->getCaps().npotTextureTileSupport()) {
if (!GrIsPow2(desc.fWidth) || GrIsPow2(desc.fHeight)) {
flags |= kStretchToPOT_TextureFlag;
if (params->isBilerp()) {
cacheID->fResourceSpecific16 |= kFilter_TextureBit;
flags |= kFilter_TextureFlag;
}
}
}
return flags;
}
if (scratch) {
cacheID->fResourceSpecific16 |= kScratch_TextureBit;
}
GrResourceKey::ResourceType texture_resource_type() {
static const GrResourceKey::ResourceType gType = GrResourceKey::GenerateResourceType();
return gType;
}
}
GrResourceKey GrTexture::ComputeKey(const GrGpu* gpu,
const GrTextureParams* params,
const GrTextureDesc& desc,
const GrCacheData& cacheData,
bool scratch) {
GrCacheID id(GrTexture::GetResourceType());
gen_texture_key_values(gpu, params, desc, cacheData, scratch, &id);
const GrCacheID& cacheID) {
GrResourceKey::ResourceFlags flags = get_texture_flags(gpu, params, desc);
return GrResourceKey(cacheID, texture_resource_type(), flags);
}
uint32_t v[4];
id.toRaw(v);
return GrResourceKey(v);
GrResourceKey GrTexture::ComputeScratchKey(const GrTextureDesc& desc) {
GrCacheID::Key idKey;
// Instead of a client-provided key of the texture contents we create a key from the
// descriptor.
GR_STATIC_ASSERT(sizeof(idKey) >= 12);
GrAssert(desc.fHeight < (1 << 16));
GrAssert(desc.fWidth < (1 << 16));
idKey.fData32[0] = (desc.fWidth) | (desc.fHeight << 16);
idKey.fData32[1] = desc.fConfig | desc.fSampleCnt << 16;
idKey.fData32[2] = desc.fFlags;
static const int kPadSize = sizeof(idKey) - 12;
memset(idKey.fData8 + 12, 0, kPadSize);
GrCacheID cacheID(GrResourceKey::ScratchDomain(), idKey);
return GrResourceKey(cacheID, texture_resource_type(), 0);
}
bool GrTexture::NeedsResizing(const GrResourceKey& key) {
return 0 != (key.getValue32(3) & kNPOT_TextureBit);
}
bool GrTexture::IsScratchTexture(const GrResourceKey& key) {
return 0 != (key.getValue32(3) & kScratch_TextureBit);
return SkToBool(key.getResourceFlags() & kStretchToPOT_TextureFlag);
}
bool GrTexture::NeedsFiltering(const GrResourceKey& key) {
return 0 != (key.getValue32(3) & kFilter_TextureBit);
return SkToBool(key.getResourceFlags() & kFilter_TextureFlag);
}

View File

@ -1104,7 +1104,7 @@ bool SkGpuDevice::shouldTileBitmap(const SkBitmap& bitmap,
return false;
}
// if the entire texture is already in our cache then no reason to tile it
if (this->isBitmapInTextureCache(bitmap, params)) {
if (GrIsBitmapInCache(fContext, bitmap, &params)) {
return false;
}
@ -1848,22 +1848,6 @@ void SkGpuDevice::flush() {
///////////////////////////////////////////////////////////////////////////////
bool SkGpuDevice::isBitmapInTextureCache(const SkBitmap& bitmap,
const GrTextureParams& params) const {
uint64_t key = bitmap.getGenerationID();
key |= ((uint64_t) bitmap.pixelRefOffset()) << 32;
GrTextureDesc desc;
desc.fWidth = bitmap.width();
desc.fHeight = bitmap.height();
desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap.config());
GrCacheData cacheData(key);
return this->context()->isTextureInCache(desc, cacheData, &params);
}
SkDevice* SkGpuDevice::onCreateCompatibleDevice(SkBitmap::Config config,
int width, int height,
bool isOpaque,
@ -1883,10 +1867,10 @@ SkDevice* SkGpuDevice::onCreateCompatibleDevice(SkBitmap::Config config,
#if CACHE_COMPATIBLE_DEVICE_TEXTURES
// layers are never draw in repeat modes, so we can request an approx
// match and ignore any padding.
GrContext::ScratchTexMatch matchType = (kSaveLayer_Usage == usage) ?
GrContext::kApprox_ScratchTexMatch :
GrContext::kExact_ScratchTexMatch;
texture = fContext->lockScratchTexture(desc, matchType);
const GrContext::ScratchTexMatch match = (kSaveLayer_Usage == usage) ?
GrContext::kApprox_ScratchTexMatch :
GrContext::kExact_ScratchTexMatch;
texture = fContext->lockScratchTexture(desc, match);
#else
tunref.reset(fContext->createUncachedTexture(desc, NULL, 0));
texture = tunref.get();

View File

@ -56,8 +56,34 @@ static void build_compressed_data(void* buffer, const SkBitmap& bitmap) {
////////////////////////////////////////////////////////////////////////////////
void generate_bitmap_cache_id(const SkBitmap& bitmap, GrCacheID* id) {
// Our id includes the offset, width, and height so that bitmaps created by extractSubset()
// are unique.
uint32_t genID = bitmap.getGenerationID();
size_t offset = bitmap.pixelRefOffset();
int16_t width = static_cast<int16_t>(bitmap.width());
int16_t height = static_cast<int16_t>(bitmap.height());
GrCacheID::Key key;
memcpy(key.fData8, &genID, 4);
memcpy(key.fData8 + 4, &width, 2);
memcpy(key.fData8 + 6, &height, 2);
memcpy(key.fData8 + 8, &offset, sizeof(size_t));
GR_STATIC_ASSERT(sizeof(key) >= 8 + sizeof(size_t));
static const GrCacheID::Domain gBitmapTextureDomain = GrCacheID::GenerateDomain();
id->reset(gBitmapTextureDomain, key);
}
void generate_bitmap_texture_desc(const SkBitmap& bitmap, GrTextureDesc* desc) {
desc->fFlags = kNone_GrTextureFlags;
desc->fWidth = bitmap.width();
desc->fHeight = bitmap.height();
desc->fConfig = SkBitmapConfig2GrPixelConfig(bitmap.config());
desc->fSampleCnt = 0;
}
static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
uint64_t key,
bool cache,
const GrTextureParams* params,
const SkBitmap& origBitmap) {
SkAutoLockPixels alp(origBitmap);
@ -71,11 +97,7 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
const SkBitmap* bitmap = &origBitmap;
GrTextureDesc desc;
desc.fWidth = bitmap->width();
desc.fHeight = bitmap->height();
desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap->config());
GrCacheData cacheData(key);
generate_bitmap_texture_desc(*bitmap, &desc);
if (SkBitmap::kIndex8_Config == bitmap->config()) {
// build_compressed_data doesn't do npot->pot expansion
@ -91,31 +113,33 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
// our compressed data will be trimmed, so pass width() for its
// "rowBytes", since they are the same now.
if (GrCacheData::kScratch_CacheID != key) {
return ctx->createTexture(params, desc, cacheData,
if (cache) {
GrCacheID cacheID;
generate_bitmap_cache_id(origBitmap, &cacheID);
return ctx->createTexture(params, desc, cacheID,
storage.get(),
bitmap->width());
} else {
GrTexture* result = ctx->lockScratchTexture(desc,
GrContext::kExact_ScratchTexMatch);
GrContext::kExact_ScratchTexMatch);
result->writePixels(0, 0, bitmap->width(),
bitmap->height(), desc.fConfig,
storage.get());
return result;
}
} else {
origBitmap.copyTo(&tmpBitmap, SkBitmap::kARGB_8888_Config);
// now bitmap points to our temp, which has been promoted to 32bits
bitmap = &tmpBitmap;
desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap->config());
}
}
desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap->config());
if (GrCacheData::kScratch_CacheID != key) {
if (cache) {
// This texture is likely to be used again so leave it in the cache
// but locked.
return ctx->createTexture(params, desc, cacheData,
GrCacheID cacheID;
generate_bitmap_cache_id(origBitmap, &cacheID);
return ctx->createTexture(params, desc, cacheID,
bitmap->getPixels(),
bitmap->rowBytes());
} else {
@ -124,8 +148,7 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
// cache so no one else can find it. Additionally, once unlocked, the
// scratch texture will go to the end of the list for purging so will
// likely be available for this volatile bitmap the next time around.
GrTexture* result = ctx->lockScratchTexture(desc,
GrContext::kExact_ScratchTexMatch);
GrTexture* result = ctx->lockScratchTexture(desc, GrContext::kExact_ScratchTexMatch);
result->writePixels(0, 0,
bitmap->width(), bitmap->height(),
desc.fConfig,
@ -135,32 +158,37 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
}
}
///////////////////////////////////////////////////////////////////////////////
bool GrIsBitmapInCache(const GrContext* ctx,
const SkBitmap& bitmap,
const GrTextureParams* params) {
GrCacheID cacheID;
generate_bitmap_cache_id(bitmap, &cacheID);
GrTextureDesc desc;
generate_bitmap_texture_desc(bitmap, &desc);
return ctx->isTextureInCache(desc, cacheID, params);
}
GrTexture* GrLockCachedBitmapTexture(GrContext* ctx,
const SkBitmap& bitmap,
const GrTextureParams* params) {
GrTexture* result = NULL;
if (!bitmap.isVolatile()) {
// If the bitmap isn't changing try to find a cached copy first
uint64_t key = bitmap.getGenerationID();
key |= ((uint64_t) bitmap.pixelRefOffset()) << 32;
bool cache = !bitmap.isVolatile();
if (cache) {
// If the bitmap isn't changing try to find a cached copy first.
GrCacheID cacheID;
generate_bitmap_cache_id(bitmap, &cacheID);
GrTextureDesc desc;
desc.fWidth = bitmap.width();
desc.fHeight = bitmap.height();
desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap.config());
generate_bitmap_texture_desc(bitmap, &desc);
GrCacheData cacheData(key);
result = ctx->findTexture(desc, cacheData, params);
if (NULL == result) {
// didn't find a cached copy so create one
result = sk_gr_create_bitmap_texture(ctx, key, params, bitmap);
}
} else {
result = sk_gr_create_bitmap_texture(ctx, GrCacheData::kScratch_CacheID, params, bitmap);
result = ctx->findTexture(desc, cacheID, params);
}
if (NULL == result) {
result = sk_gr_create_bitmap_texture(ctx, cache, params, bitmap);
}
if (NULL == result) {
GrPrintf("---- failed to create texture for cache [%d %d]\n",

View File

@ -17,9 +17,6 @@
#define VALIDATE
#endif
GR_DEFINE_RESOURCE_CACHE_DOMAIN(GrTextureStripAtlas, GetTextureStripAtlasDomain)
int32_t GrTextureStripAtlas::gCacheCount = 0;
GrTHashTable<GrTextureStripAtlas::AtlasEntry,
@ -73,7 +70,7 @@ GrTextureStripAtlas* GrTextureStripAtlas::GetAtlas(const GrTextureStripAtlas::De
}
GrTextureStripAtlas::GrTextureStripAtlas(GrTextureStripAtlas::Desc desc)
: fCacheID(sk_atomic_inc(&gCacheCount))
: fCacheKey(sk_atomic_inc(&gCacheCount))
, fLockedRows(0)
, fDesc(desc)
, fNumRows(desc.fHeight / desc.fRowHeight)
@ -198,11 +195,16 @@ void GrTextureStripAtlas::lockTexture() {
texDesc.fWidth = fDesc.fWidth;
texDesc.fHeight = fDesc.fHeight;
texDesc.fConfig = fDesc.fConfig;
GrCacheData cacheData(fCacheID);
cacheData.fResourceDomain = GetTextureStripAtlasDomain();
fTexture = fDesc.fContext->findTexture(texDesc, cacheData, &params);
static const GrCacheID::Domain gTextureStripAtlasDomain = GrCacheID::GenerateDomain();
GrCacheID::Key key;
*key.fData32 = fCacheKey;
memset(key.fData32 + 1, 0, sizeof(key) - sizeof(uint32_t));
GrCacheID cacheID(gTextureStripAtlasDomain, key);
fTexture = fDesc.fContext->findTexture(texDesc, cacheID, &params);
if (NULL == fTexture) {
fTexture = fDesc.fContext->createTexture(&params, texDesc, cacheData, NULL, 0);
fTexture = fDesc.fContext->createTexture(&params, texDesc, cacheID, NULL, 0);
// This is a new texture, so all of our cache info is now invalid
this->initLRU();
fKeyTable.rewind();

View File

@ -21,8 +21,6 @@
*/
class GrTextureStripAtlas {
public:
GR_DECLARE_RESOURCE_CACHE_DOMAIN(GetTextureStripAtlasDomain)
/**
* Descriptor struct which we'll use as a hash table key
**/
@ -157,7 +155,7 @@ private:
// A unique ID for this texture (formed with: gCacheCount++), so we can be sure that if we
// get a texture back from the texture cache, that it's the same one we last used.
const uint64_t fCacheID;
const int32_t fCacheKey;
// Total locks on all rows (when this reaches zero, we can unlock our texture)
int32_t fLockedRows;