Revert r4287. Large numbers of textures created by random gradients can cause poor driver performance (at least on Mac)

BUG=670



git-svn-id: http://skia.googlecode.com/svn/trunk@4306 2bbb7eff-a529-9590-31e7-b0007b416f81
This commit is contained in:
bsalomon@google.com 2012-06-22 15:15:59 +00:00
parent 89e615d260
commit 07fc0d178e
5 changed files with 64 additions and 37 deletions

View File

@ -83,6 +83,11 @@ public:
*/
void freeGpuResources();
/**
* Returns the number of bytes of GPU memory hosted by the texture cache.
*/
size_t getGpuTextureCacheBytes() const;
///////////////////////////////////////////////////////////////////////////
// Textures
@ -212,28 +217,25 @@ public:
int height) const;
/**
* Return the current texture cache budget in bytes.
* Return the current texture cache limits.
*
* @param maxTextures If non-null, returns maximum number of textures that
* can be held in the cache.
* @param maxTextureBytes If non-null, returns maximum number of bytes of
* texture memory that can be held in the cache.
*/
size_t getTextureCacheBudget() const;
void getTextureCacheLimits(int* maxTextures, size_t* maxTextureBytes) const;
/**
* Specify the texture cache budget. If the current cache size exceeds the
* budget it will immediately be purged to be within the budget.
* Specify the texture cache limits. If the current cache exceeds either
* of these, it will be purged (LRU) to keep the cache within these limits.
*
* @param maxTextures The maximum number of textures that can be held in
* the cache.
* @param maxTextureBytes The maximum number of bytes of texture memory
* that can be held in the cache.
*/
void setTextureCacheBudget(size_t maxTextureBytes);
// DEPRECATED, this will be deleted soon.
void setTextureCacheLimits(int ignored, size_t maxTextureBytes) {
this->setTextureCacheBudget(maxTextureBytes);
}
/**
* Returns the current number of bytes of GPU memory hosted by the texture
* cache.
*/
size_t getGpuTextureCacheBytes() const;
void setTextureCacheLimits(int maxTextures, size_t maxTextureBytes);
/**
* Return the max width or height of a texture supported by the current gpu

View File

@ -46,7 +46,8 @@
#define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
#endif
static const size_t kDefaultTextureCacheBudget = 16 * 1024 * 1024;
static const size_t MAX_TEXTURE_CACHE_COUNT = 256;
static const size_t MAX_TEXTURE_CACHE_BYTES = 16 * 1024 * 1024;
static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
@ -558,12 +559,13 @@ GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
return fGpu->createTexture(descCopy, srcData, rowBytes);
}
size_t GrContext::getTextureCacheBudget() const {
return fTextureCache->getBudget();
void GrContext::getTextureCacheLimits(int* maxTextures,
size_t* maxTextureBytes) const {
fTextureCache->getLimits(maxTextures, maxTextureBytes);
}
void GrContext::setTextureCacheBudget(size_t maxTextureBytes) {
fTextureCache->setBudget(maxTextureBytes);
void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) {
fTextureCache->setLimits(maxTextures, maxTextureBytes);
}
int GrContext::getMaxTextureSize() const {
@ -1752,7 +1754,8 @@ GrContext::GrContext(GrGpu* gpu) {
fPathRendererChain = NULL;
fSoftwarePathRenderer = NULL;
fTextureCache = new GrResourceCache(kDefaultTextureCacheBudget);
fTextureCache = new GrResourceCache(MAX_TEXTURE_CACHE_COUNT,
MAX_TEXTURE_CACHE_BYTES);
fFontCache = new GrFontCache(fGpu);
fLastDrawCategory = kUnbuffered_DrawCategory;

View File

@ -34,8 +34,9 @@ void GrResourceEntry::validate() const {
///////////////////////////////////////////////////////////////////////////////
GrResourceCache::GrResourceCache(size_t maxBytes) {
fMaxBytes = maxBytes;
GrResourceCache::GrResourceCache(int maxCount, size_t maxBytes) :
fMaxCount(maxCount),
fMaxBytes(maxBytes) {
fEntryCount = 0;
fUnlockedEntryCount = 0;
fEntryBytes = 0;
@ -52,9 +53,19 @@ GrResourceCache::~GrResourceCache() {
this->removeAll();
}
void GrResourceCache::setBudget(size_t maxResourceBytes) {
bool smaller = maxResourceBytes < fMaxBytes;
void GrResourceCache::getLimits(int* maxResources, size_t* maxResourceBytes) const{
if (maxResources) {
*maxResources = fMaxCount;
}
if (maxResourceBytes) {
*maxResourceBytes = fMaxBytes;
}
}
void GrResourceCache::setLimits(int maxResources, size_t maxResourceBytes) {
bool smaller = (maxResources < fMaxCount) || (maxResourceBytes < fMaxBytes);
fMaxCount = maxResources;
fMaxBytes = maxResourceBytes;
if (smaller) {
@ -233,7 +244,6 @@ void GrResourceCache::reattachAndUnlock(GrResourceEntry* entry) {
fClientDetachedCount -= 1;
fEntryCount -= 1;
size_t size = entry->resource()->sizeInBytes();
GrAssert(size > 0);
fClientDetachedBytes -= size;
fEntryBytes -= size;
}
@ -272,7 +282,7 @@ void GrResourceCache::purgeAsNeeded() {
GrResourceEntry* entry = fTail;
while (entry && fUnlockedEntryCount) {
GrAutoResourceCacheValidate atcv(this);
if (fEntryBytes <= fMaxBytes) {
if (fEntryCount <= fMaxCount && fEntryBytes <= fMaxBytes) {
withinBudget = true;
break;
}
@ -308,7 +318,9 @@ void GrResourceCache::removeAll() {
// entry out. Instead change the budget and purge.
int savedMaxBytes = fMaxBytes;
fMaxBytes = 0;
int savedMaxCount = fMaxCount;
fMaxBytes = (size_t) -1;
fMaxCount = 0;
this->purgeAsNeeded();
#if GR_DEBUG
@ -325,6 +337,7 @@ void GrResourceCache::removeAll() {
#endif
fMaxBytes = savedMaxBytes;
fMaxCount = savedMaxCount;
}
///////////////////////////////////////////////////////////////////////////////

View File

@ -194,22 +194,29 @@ private:
*/
class GrResourceCache {
public:
GrResourceCache(size_t maxResourceBytes);
GrResourceCache(int maxCount, size_t maxBytes);
~GrResourceCache();
/**
* Returns the cache budget in bytes.
* Return the current resource cache limits.
*
* @param maxResource If non-null, returns maximum number of resources
* that can be held in the cache.
* @param maxBytes If non-null, returns maximum number of bytes of
* gpu memory that can be held in the cache.
*/
size_t getBudget() const { return fMaxBytes; }
void getLimits(int* maxResources, size_t* maxBytes) const;
/**
* Specify the resource cache budget in bytes of GPU memory. If the current
* cache exceeds the budget it will be purged to be within the budget.
* Specify the resource cache limits. If the current cache exceeds either
* of these, it will be purged (LRU) to keep the cache within these limits.
*
* @param maxResourceBytes The maximum number of bytes of GPU memory that
* can be held in the cache.
* @param maxResources The maximum number of resources that can be held in
* the cache.
* @param maxBytes The maximum number of bytes of resource memory that
* can be held in the cache.
*/
void setBudget(size_t maxResourceBytes);
void setLimits(int maxResource, size_t maxResourceBytes);
/**
* Returns the number of bytes consumed by cached resources.
@ -300,6 +307,7 @@ private:
GrResourceEntry* fTail;
// our budget, used in purgeAsNeeded()
int fMaxCount;
size_t fMaxBytes;
// our current stats, related to our budget

View File

@ -1154,7 +1154,8 @@ bool SkGpuDevice::shouldTileBitmap(const SkBitmap& bitmap,
// assumption here is that sw bitmap size is a good proxy for its size as
// a texture
size_t bmpSize = bitmap.getSize();
size_t cacheSize = fContext->getTextureCacheBudget();
size_t cacheSize;
fContext->getTextureCacheLimits(NULL, &cacheSize);
if (bmpSize < cacheSize / 2) {
return false;
}