Make GrTextureCache into a generic GrResource cache. Also some GrContext texture interface cleanup.

http://codereview.appspot.com/4815055/


git-svn-id: http://skia.googlecode.com/svn/trunk@1965 2bbb7eff-a529-9590-31e7-b0007b416f81
This commit is contained in:
bsalomon@google.com 2011-07-26 20:45:30 +00:00
parent f131694617
commit 50398bf7f1
10 changed files with 472 additions and 407 deletions

View File

@ -18,16 +18,18 @@
#define GrContext_DEFINED
#include "GrClip.h"
#include "GrTextureCache.h"
#include "GrPaint.h"
#include "GrPathRenderer.h"
class GrFontCache;
class GrGpu;
struct GrGpuStats;
class GrVertexBufferAllocPool;
class GrIndexBufferAllocPool;
class GrInOrderDrawBuffer;
class GrResourceEntry;
class GrResourceCache;
class GrVertexBufferAllocPool;
class GR_API GrContext : public GrRefCnt {
public:
@ -82,39 +84,62 @@ public:
// Textures
/**
* Search for an entry with the same Key. If found, "lock" it and return it.
* If not found, return null.
* Token that refers to an entry in the texture cache. Returned by
* functions that lock textures. Passed to unlockTexture.
*/
GrTextureEntry* findAndLockTexture(GrTextureKey*,
const GrSamplerState&);
class TextureCacheEntry {
public:
TextureCacheEntry() : fEntry(NULL) {}
TextureCacheEntry(const TextureCacheEntry& e) : fEntry(e.fEntry) {}
TextureCacheEntry& operator= (const TextureCacheEntry& e) {
fEntry = e.fEntry;
return *this;
}
GrTexture* texture() const;
void reset() { fEntry = NULL; }
private:
explicit TextureCacheEntry(GrResourceEntry* entry) { fEntry = entry; }
void set(GrResourceEntry* entry) { fEntry = entry; }
GrResourceEntry* cacheEntry() { return fEntry; }
GrResourceEntry* fEntry;
friend class GrContext;
};
/**
* Key generated by client. Should be a unique key on the texture data.
* Does not need to consider that width and height of the texture. Two
* textures with the same TextureKey but different bounds will not collide.
*/
typedef uint64_t TextureKey;
/**
* Search for an entry based on key and dimensions. If found, "lock" it and
* return it. The entry's texture() function will return NULL if not found.
* Must call be balanced with an unlockTexture() call.
*/
TextureCacheEntry findAndLockTexture(TextureKey key,
int width,
int height,
const GrSamplerState&);
/**
* Create a new entry, based on the specified key and texture, and return
* its "locked" entry.
*
* Ownership of the texture is transferred to the Entry, which will unref()
* it when we are purged or deleted.
* its "locked" entry. Must call be balanced with an unlockTexture() call.
*/
GrTextureEntry* createAndLockTexture(GrTextureKey* key,
TextureCacheEntry createAndLockTexture(TextureKey key,
const GrSamplerState&,
const GrTextureDesc&,
void* srcData, size_t rowBytes);
/**
* Returns a texture matching the desc. It's contents are unknown. Subsequent
* requests with the same descriptor are not guaranteed to return the same
* texture. The same texture is guaranteed not be returned again until it is
* unlocked.
*
* Textures created by createAndLockTexture() hide the complications of
* tiling non-power-of-two textures on APIs that don't support this (e.g.
* unextended GLES2). Tiling a npot texture created by lockKeylessTexture on
* such an API will create gaps in the tiling pattern. This includes clamp
* mode. (This may be addressed in a future update.)
* Enum that determines how closely a returned scratch texture must match
* a provided GrTextureDesc.
*/
GrTextureEntry* lockKeylessTexture(const GrTextureDesc& desc);
enum ScratchTexMatch {
/**
* Finds a texture that exactly matches the descriptor.
*/
kExact_ScratchTexMatch,
/**
* Finds a texture that approximately matches the descriptor. Will be
* at least as large in width and height as desc specifies. If desc
@ -123,13 +148,28 @@ public:
* no stencil flag then result will have a stencil. Format and aa level
* will always match.
*/
GrTextureEntry* findApproximateKeylessTexture(const GrTextureDesc& desc);
kApprox_ScratchTexMatch
};
/**
* Returns a texture matching the desc. It's contents are unknown. Subsequent
* requests with the same descriptor are not guaranteed to return the same
* texture. The same texture is guaranteed not be returned again until it is
* unlocked. Must call be balanced with an unlockTexture() call.
*
* Textures created by createAndLockTexture() hide the complications of
* tiling non-power-of-two textures on APIs that don't support this (e.g.
* unextended GLES2). Tiling a npot texture created by lockScratchTexture on
* such an API will create gaps in the tiling pattern. This includes clamp
* mode. (This may be addressed in a future update.)
*/
TextureCacheEntry lockScratchTexture(const GrTextureDesc& desc, ScratchTexMatch match);
/**
* When done with an entry, call unlockTexture(entry) on it, which returns
* it to the cache, where it may be purged.
*/
void unlockTexture(GrTextureEntry* entry);
void unlockTexture(TextureCacheEntry entry);
/**
* Creates a texture that is outside the cache. Does not count against
@ -195,9 +235,6 @@ public:
///////////////////////////////////////////////////////////////////////////
// Platform Surfaces
// GrContext provides an interface for wrapping externally created textures
// and rendertargets in their Gr-equivalents.
/**
* Wraps an existing 3D API surface in a GrObject. desc.fFlags determines
* the type of object returned. If kIsTexture is set the returned object
@ -213,6 +250,7 @@ public:
* on failure.
*/
GrResource* createPlatformSurface(const GrPlatformSurfaceDesc& desc);
/**
* Reads the current target object (e.g. FBO or IDirect3DSurface9*) and
* viewport state from the underlying 3D API and wraps it in a
@ -560,7 +598,7 @@ private:
DrawCategory fLastDrawCategory;
GrGpu* fGpu;
GrTextureCache* fTextureCache;
GrResourceCache* fTextureCache;
GrFontCache* fFontCache;
GrPathRenderer* fCustomPathRenderer;
@ -597,10 +635,6 @@ private:
static void SetPaint(const GrPaint& paint, GrDrawTarget* target);
bool finalizeTextureKey(GrTextureKey*,
const GrSamplerState&,
bool keyless) const;
GrDrawTarget* prepareToDraw(const GrPaint& paint, DrawCategory drawType);
void drawClipIntoStencil();
@ -680,24 +714,54 @@ private:
};
/**
* Unlocks a texture entry when this goes out of scope. Entry may be NULL.
* Gets and locks a scratch texture from a descriptor using
* either exact or approximate criteria. Unlocks texture in
* the destructor.
*/
class GrAutoUnlockTextureEntry : ::GrNoncopyable {
class GrAutoScratchTexture : ::GrNoncopyable {
public:
GrAutoUnlockTextureEntry(GrContext* context, GrTextureEntry* entry)
: fContext(context)
, fEntry(entry) {
GrAutoScratchTexture()
: fContext(NULL) {
}
~GrAutoUnlockTextureEntry() {
if (fContext && fEntry) {
GrAutoScratchTexture(GrContext* context,
const GrTextureDesc& desc,
GrContext::ScratchTexMatch match =
GrContext::kApprox_ScratchTexMatch)
: fContext(NULL) {
this->set(context, desc, match);
}
~GrAutoScratchTexture() {
if (NULL != fContext) {
fContext->unlockTexture(fEntry);
}
}
GrTexture* texture() { return fEntry->texture(); }
GrTexture* set(GrContext* context,
const GrTextureDesc& desc,
GrContext::ScratchTexMatch match =
GrContext::kApprox_ScratchTexMatch) {
if (NULL != fContext) {
fContext->unlockTexture(fEntry);
}
fContext = context;
if (NULL != fContext) {
fEntry = fContext->lockScratchTexture(desc, match);
GrTexture* ret = fEntry.texture();
if (NULL == ret) {
fContext = NULL;
}
return ret;
} else {
return NULL;
}
}
GrTexture* texture() { return fEntry.texture(); }
private:
GrContext* fContext;
GrTextureEntry* fEntry;
GrContext::TextureCacheEntry fEntry;
};
#endif

View File

@ -22,7 +22,7 @@
#include "GrInOrderDrawBuffer.h"
#include "GrPathRenderer.h"
#include "GrPathUtils.h"
#include "GrTextureCache.h"
#include "GrResourceCache.h"
#include "GrTextStrike.h"
#include "SkTrace.h"
@ -135,41 +135,66 @@ int GrContext::PaintStageVertexLayoutBits(
enum {
kNPOTBit = 0x1,
kFilterBit = 0x2,
kKeylessBit = 0x4,
kScratchBit = 0x4,
};
bool GrContext::finalizeTextureKey(GrTextureKey* key,
const GrSamplerState& sampler,
bool keyless) const {
uint32_t bits = 0;
uint16_t width = key->width();
uint16_t height = key->height();
GrTexture* GrContext::TextureCacheEntry::texture() const {
if (NULL == fEntry) {
return NULL;
} else {
return (GrTexture*) fEntry->resource();
}
}
if (!fGpu->npotTextureTileSupport()) {
namespace {
// returns true if this is a "special" texture because of gpu NPOT limitations
bool gen_texture_key_values(const GrGpu* gpu,
const GrSamplerState& sampler,
GrContext::TextureKey clientKey,
int width,
int height,
bool scratch,
uint32_t v[4]) {
GR_STATIC_ASSERT(sizeof(GrContext::TextureKey) == sizeof(uint64_t));
// we assume we only need 16 bits of width and height
// assert that texture creation will fail anyway if this assumption
// would cause key collisions.
GrAssert(gpu->maxTextureSize() <= SK_MaxU16);
v[0] = clientKey & 0xffffffffUL;
v[1] = (clientKey >> 32) & 0xffffffffUL;
v[2] = width | (height << 16);
v[3] = 0;
if (!gpu->npotTextureTileSupport()) {
bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
bool tiled = (sampler.getWrapX() != GrSamplerState::kClamp_WrapMode) ||
(sampler.getWrapY() != GrSamplerState::kClamp_WrapMode);
if (tiled && !isPow2) {
bits |= kNPOTBit;
v[3] |= kNPOTBit;
if (GrSamplerState::kNearest_Filter != sampler.getFilter()) {
bits |= kFilterBit;
v[3] |= kFilterBit;
}
}
}
if (keyless) {
bits |= kKeylessBit;
if (scratch) {
v[3] |= kScratchBit;
}
key->finalize(bits);
return 0 != bits;
return v[3] & kNPOTBit;
}
}
GrTextureEntry* GrContext::findAndLockTexture(GrTextureKey* key,
GrContext::TextureCacheEntry GrContext::findAndLockTexture(TextureKey key,
int width,
int height,
const GrSamplerState& sampler) {
finalizeTextureKey(key, sampler, false);
return fTextureCache->findAndLock(*key);
uint32_t v[4];
gen_texture_key_values(fGpu, sampler, key, width, height, false, v);
GrResourceKey resourceKey(v);
return TextureCacheEntry(fTextureCache->findAndLock(resourceKey));
}
static void stretchImage(void* dst,
@ -199,32 +224,34 @@ static void stretchImage(void* dst,
}
}
GrTextureEntry* GrContext::createAndLockTexture(GrTextureKey* key,
GrContext::TextureCacheEntry GrContext::createAndLockTexture(TextureKey key,
const GrSamplerState& sampler,
const GrTextureDesc& desc,
void* srcData, size_t rowBytes) {
SK_TRACE_EVENT0("GrContext::createAndLockTexture");
GrAssert(key->width() == desc.fWidth);
GrAssert(key->height() == desc.fHeight);
#if GR_DUMP_TEXTURE_UPLOAD
GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight);
#endif
GrTextureEntry* entry = NULL;
bool special = finalizeTextureKey(key, sampler, false);
if (special) {
GrTextureEntry* clampEntry;
GrTextureKey clampKey(*key);
clampEntry = findAndLockTexture(&clampKey, GrSamplerState::ClampNoFilter());
TextureCacheEntry entry;
uint32_t v[4];
bool special = gen_texture_key_values(fGpu, sampler, key,
desc.fWidth, desc.fHeight, false, v);
GrResourceKey resourceKey(v);
if (NULL == clampEntry) {
clampEntry = createAndLockTexture(&clampKey,
if (special) {
TextureCacheEntry clampEntry =
findAndLockTexture(key, desc.fWidth, desc.fHeight,
GrSamplerState::ClampNoFilter());
if (NULL == clampEntry.texture()) {
clampEntry = createAndLockTexture(key,
GrSamplerState::ClampNoFilter(),
desc, srcData, rowBytes);
GrAssert(NULL != clampEntry);
if (NULL == clampEntry) {
return NULL;
GrAssert(NULL != clampEntry.texture());
if (NULL == clampEntry.texture()) {
return entry;
}
}
GrTextureDesc rtDesc = desc;
@ -241,7 +268,7 @@ GrTextureEntry* GrContext::createAndLockTexture(GrTextureKey* key,
if (NULL != texture) {
GrDrawTarget::AutoStateRestore asr(fGpu);
fGpu->setRenderTarget(texture->asRenderTarget());
fGpu->setTexture(0, clampEntry->texture());
fGpu->setTexture(0, clampEntry.texture());
fGpu->disableStencil();
fGpu->setViewMatrix(GrMatrix::I());
fGpu->setAlpha(0xff);
@ -276,7 +303,7 @@ GrTextureEntry* GrContext::createAndLockTexture(GrTextureKey* key,
verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint));
fGpu->drawNonIndexed(kTriangleFan_PrimitiveType,
0, 4);
entry = fTextureCache->createAndLock(*key, texture);
entry.set(fTextureCache->createAndLock(resourceKey, texture));
}
texture->releaseRenderTarget();
} else {
@ -302,69 +329,64 @@ GrTextureEntry* GrContext::createAndLockTexture(GrTextureKey* key,
stretchedPixels.get(),
stretchedRowBytes);
GrAssert(NULL != texture);
entry = fTextureCache->createAndLock(*key, texture);
entry.set(fTextureCache->createAndLock(resourceKey, texture));
}
fTextureCache->unlock(clampEntry);
fTextureCache->unlock(clampEntry.cacheEntry());
} else {
GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes);
if (NULL != texture) {
entry = fTextureCache->createAndLock(*key, texture);
} else {
entry = NULL;
entry.set(fTextureCache->createAndLock(resourceKey, texture));
}
}
return entry;
}
GrTextureEntry* GrContext::lockKeylessTexture(const GrTextureDesc& desc) {
uint32_t p0 = desc.fFormat;
uint32_t p1 = (desc.fAALevel << 16) | desc.fFlags;
GrTextureKey key(p0, p1, desc.fWidth, desc.fHeight);
this->finalizeTextureKey(&key, GrSamplerState::ClampNoFilter(), true);
GrTextureEntry* entry = fTextureCache->findAndLock(key);
if (NULL == entry) {
GrTexture* texture = fGpu->createTexture(desc, NULL, 0);
if (NULL != texture) {
entry = fTextureCache->createAndLock(key, texture);
}
}
// If the caller gives us the same desc/sampler twice we don't want
// to return the same texture the second time (unless it was previously
// released). So we detach the entry from the cache and reattach at release.
if (NULL != entry) {
fTextureCache->detach(entry);
}
return entry;
namespace {
inline void gen_scratch_tex_key_values(const GrGpu* gpu,
const GrTextureDesc& desc,
uint32_t v[4]) {
// Instead of a client-provided key of the texture contents
// we create a key of from the descriptor.
GrContext::TextureKey descKey = desc.fAALevel |
(desc.fFlags << 8) |
((uint64_t) desc.fFormat << 32);
// this code path isn't friendly to tiling with NPOT restricitons
// We just pass ClampNoFilter()
gen_texture_key_values(gpu, GrSamplerState::ClampNoFilter(), descKey,
desc.fWidth, desc.fHeight, true, v);
}
}
GrTextureEntry* GrContext::findApproximateKeylessTexture(
const GrTextureDesc& inDesc) {
GrContext::TextureCacheEntry GrContext::lockScratchTexture(
const GrTextureDesc& inDesc,
ScratchTexMatch match) {
GrTextureDesc desc = inDesc;
if (kExact_ScratchTexMatch != match) {
// bin by pow2 with a reasonable min
static const int MIN_SIZE = 256;
desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth));
desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight));
}
uint32_t p0 = desc.fFormat;
uint32_t p1 = (desc.fAALevel << 16) | desc.fFlags;
GrTextureEntry* entry;
bool keepTrying = true;
GrResourceEntry* entry;
int origWidth = desc.fWidth;
int origHeight = desc.fHeight;
bool doubledW = false;
bool doubledH = false;
do {
GrTextureKey key(p0, p1, desc.fWidth, desc.fHeight);
this->finalizeTextureKey(&key, GrSamplerState::ClampNoFilter(), true);
uint32_t v[4];
gen_scratch_tex_key_values(fGpu, desc, v);
GrResourceKey key(v);
entry = fTextureCache->findAndLock(key);
// if we miss, relax the fit of the flags...
// then try doubling width... then height.
if (NULL != entry) {
if (NULL != entry || kExact_ScratchTexMatch == match) {
break;
}
if (!(desc.fFlags & kRenderTarget_GrTextureFlagBit)) {
@ -392,9 +414,9 @@ GrTextureEntry* GrContext::findApproximateKeylessTexture(
desc.fHeight = origHeight;
GrTexture* texture = fGpu->createTexture(desc, NULL, 0);
if (NULL != texture) {
GrTextureKey key(p0, p1, desc.fWidth, desc.fHeight);
this->finalizeTextureKey(&key, GrSamplerState::ClampNoFilter(),
true);
uint32_t v[4];
gen_scratch_tex_key_values(fGpu, desc, v);
GrResourceKey key(v);
entry = fTextureCache->createAndLock(key, texture);
}
}
@ -405,14 +427,17 @@ GrTextureEntry* GrContext::findApproximateKeylessTexture(
if (NULL != entry) {
fTextureCache->detach(entry);
}
return entry;
return TextureCacheEntry(entry);
}
void GrContext::unlockTexture(GrTextureEntry* entry) {
if (kKeylessBit & entry->key().getPrivateBits()) {
fTextureCache->reattachAndUnlock(entry);
void GrContext::unlockTexture(TextureCacheEntry entry) {
// If this is a scratch texture we detached it from the cache
// while it was locked (to avoid two callers simultaneously getting
// the same texture).
if (kScratchBit & entry.cacheEntry()->key().getValue32(3)) {
fTextureCache->reattachAndUnlock(entry.cacheEntry());
} else {
fTextureCache->unlock(entry);
fTextureCache->unlock(entry.cacheEntry());
}
}
@ -529,9 +554,6 @@ void GrContext::drawPaint(const GrPaint& paint) {
////////////////////////////////////////////////////////////////////////////////
struct GrContext::OffscreenRecord {
OffscreenRecord() { fEntry0 = NULL; fEntry1 = NULL; }
~OffscreenRecord() { GrAssert(NULL == fEntry0 && NULL == fEntry1); }
enum Downsample {
k4x4TwoPass_Downsample,
k4x4SinglePass_Downsample,
@ -542,8 +564,8 @@ struct GrContext::OffscreenRecord {
int fTileCountX;
int fTileCountY;
int fScale;
GrTextureEntry* fEntry0;
GrTextureEntry* fEntry1;
GrAutoScratchTexture fOffscreen0;
GrAutoScratchTexture fOffscreen1;
GrDrawTarget::SavedDrawState fSavedState;
GrClip fClip;
};
@ -585,8 +607,8 @@ bool GrContext::prepareForOffscreenAA(GrDrawTarget* target,
GrAssert(GR_USE_OFFSCREEN_AA);
GrAssert(NULL == record->fEntry0);
GrAssert(NULL == record->fEntry1);
GrAssert(NULL == record->fOffscreen0.texture());
GrAssert(NULL == record->fOffscreen1.texture());
GrAssert(!boundRect.isEmpty());
int boundW = boundRect.width();
@ -627,31 +649,28 @@ bool GrContext::prepareForOffscreenAA(GrDrawTarget* target,
desc.fWidth *= record->fScale;
desc.fHeight *= record->fScale;
record->fEntry0 = this->findApproximateKeylessTexture(desc);
if (NULL == record->fEntry0) {
record->fOffscreen0.set(this, desc);
if (NULL == record->fOffscreen0.texture()) {
return false;
}
// the approximate lookup might have given us some slop space, might as well
// use it when computing the tiles size.
// these are scale values, will adjust after considering
// the possible second offscreen.
record->fTileSizeX = record->fEntry0->texture()->width();
record->fTileSizeY = record->fEntry0->texture()->height();
record->fTileSizeX = record->fOffscreen0.texture()->width();
record->fTileSizeY = record->fOffscreen0.texture()->height();
if (OffscreenRecord::k4x4TwoPass_Downsample == record->fDownsample) {
desc.fWidth /= 2;
desc.fHeight /= 2;
record->fEntry1 = this->findApproximateKeylessTexture(desc);
if (NULL == record->fEntry1) {
this->unlockTexture(record->fEntry0);
record->fEntry0 = NULL;
record->fOffscreen1.set(this, desc);
if (NULL == record->fOffscreen1.texture()) {
return false;
}
record->fTileSizeX = GrMin(record->fTileSizeX,
2 * record->fEntry0->texture()->width());
2 * record->fOffscreen0.texture()->width());
record->fTileSizeY = GrMin(record->fTileSizeY,
2 * record->fEntry0->texture()->height());
2 * record->fOffscreen0.texture()->height());
}
record->fTileSizeX /= record->fScale;
record->fTileSizeY /= record->fScale;
@ -670,7 +689,7 @@ void GrContext::setupOffscreenAAPass1(GrDrawTarget* target,
int tileX, int tileY,
OffscreenRecord* record) {
GrRenderTarget* offRT0 = record->fEntry0->texture()->asRenderTarget();
GrRenderTarget* offRT0 = record->fOffscreen0.texture()->asRenderTarget();
GrAssert(NULL != offRT0);
GrPaint tempPaint;
@ -715,7 +734,7 @@ void GrContext::doOffscreenAAPass2(GrDrawTarget* target,
int tileX, int tileY,
OffscreenRecord* record) {
SK_TRACE_EVENT0("GrContext::doOffscreenAAPass2");
GrAssert(NULL != record->fEntry0);
GrAssert(NULL != record->fOffscreen0.texture());
GrDrawTarget::AutoGeometryPush agp(target);
GrIRect tileRect;
tileRect.fLeft = boundRect.fLeft + tileX * record->fTileSizeX;
@ -738,7 +757,7 @@ void GrContext::doOffscreenAAPass2(GrDrawTarget* target,
GrSamplerState sampler(GrSamplerState::kClamp_WrapMode,
GrSamplerState::kClamp_WrapMode, filter);
GrTexture* src = record->fEntry0->texture();
GrTexture* src = record->fOffscreen0.texture();
int scale;
enum {
@ -746,9 +765,9 @@ void GrContext::doOffscreenAAPass2(GrDrawTarget* target,
};
if (OffscreenRecord::k4x4TwoPass_Downsample == record->fDownsample) {
GrAssert(NULL != record->fEntry1);
GrAssert(NULL != record->fOffscreen1.texture());
scale = 2;
GrRenderTarget* dst = record->fEntry1->texture()->asRenderTarget();
GrRenderTarget* dst = record->fOffscreen1.texture()->asRenderTarget();
// Do 2x2 downsample from first to second
target->setTexture(kOffscreenStage, src);
@ -762,7 +781,7 @@ void GrContext::doOffscreenAAPass2(GrDrawTarget* target,
scale * tileRect.height());
target->drawSimpleRect(rect, NULL, 1 << kOffscreenStage);
src = record->fEntry1->texture();
src = record->fOffscreen1.texture();
} else if (OffscreenRecord::kFSAA_Downsample == record->fDownsample) {
scale = 1;
GrIRect rect = SkIRect::MakeWH(tileRect.width(), tileRect.height());
@ -811,16 +830,10 @@ void GrContext::doOffscreenAAPass2(GrDrawTarget* target,
void GrContext::cleanupOffscreenAA(GrDrawTarget* target,
GrPathRenderer* pr,
OffscreenRecord* record) {
this->unlockTexture(record->fEntry0);
record->fEntry0 = NULL;
if (pr) {
// Counterpart of scale() in prepareForOffscreenAA()
//pr->scaleCurveTolerance(SkScalarInvert(SkIntToScalar(record->fScale)));
}
if (NULL != record->fEntry1) {
this->unlockTexture(record->fEntry1);
record->fEntry1 = NULL;
}
target->restoreDrawState(record->fSavedState);
}
@ -1471,9 +1484,8 @@ void GrContext::writePixels(int left, int top, int width, int height,
const GrTextureDesc desc = {
kNone_GrTextureFlags, kNone_GrAALevel, width, height, config
};
GrAutoUnlockTextureEntry aute(this,
this->findApproximateKeylessTexture(desc));
GrTexture* texture = aute.texture();
GrAutoScratchTexture ast(this, desc);
GrTexture* texture = ast.texture();
if (NULL == texture) {
return;
}
@ -1630,7 +1642,7 @@ GrContext::GrContext(GrGpu* gpu) :
fCustomPathRenderer = GrPathRenderer::CreatePathRenderer();
fGpu->setClipPathRenderer(fCustomPathRenderer);
fTextureCache = new GrTextureCache(MAX_TEXTURE_CACHE_COUNT,
fTextureCache = new GrResourceCache(MAX_TEXTURE_CACHE_COUNT,
MAX_TEXTURE_CACHE_BYTES);
fFontCache = new GrFontCache(fGpu);

View File

@ -16,7 +16,6 @@
#include "GrGpu.h"
#include "GrTextStrike.h"
#include "GrTextureCache.h"
#include "GrClipIterator.h"
#include "GrIndexBuffer.h"
#include "GrVertexBuffer.h"

View File

@ -15,33 +15,33 @@
*/
#include "GrTextureCache.h"
#include "GrTexture.h"
#include "GrResourceCache.h"
#include "GrResource.h"
GrTextureEntry::GrTextureEntry(const GrTextureKey& key, GrTexture* texture)
: fKey(key), fTexture(texture) {
GrResourceEntry::GrResourceEntry(const GrResourceKey& key, GrResource* resource)
: fKey(key), fResource(resource) {
fLockCount = 0;
fPrev = fNext = NULL;
// we assume ownership of the texture, and will unref it when we die
GrAssert(texture);
// we assume ownership of the resource, and will unref it when we die
GrAssert(resource);
}
GrTextureEntry::~GrTextureEntry() {
fTexture->unref();
GrResourceEntry::~GrResourceEntry() {
fResource->unref();
}
#if GR_DEBUG
void GrTextureEntry::validate() const {
void GrResourceEntry::validate() const {
GrAssert(fLockCount >= 0);
GrAssert(fTexture);
fTexture->validate();
GrAssert(fResource);
fResource->validate();
}
#endif
///////////////////////////////////////////////////////////////////////////////
GrTextureCache::GrTextureCache(int maxCount, size_t maxBytes) :
GrResourceCache::GrResourceCache(int maxCount, size_t maxBytes) :
fMaxCount(maxCount),
fMaxBytes(maxBytes) {
fEntryCount = 0;
@ -52,36 +52,36 @@ GrTextureCache::GrTextureCache(int maxCount, size_t maxBytes) :
fHead = fTail = NULL;
}
GrTextureCache::~GrTextureCache() {
GrAutoTextureCacheValidate atcv(this);
GrResourceCache::~GrResourceCache() {
GrAutoResourceCacheValidate atcv(this);
this->removeAll();
}
void GrTextureCache::getLimits(int* maxTextures, size_t* maxTextureBytes) const{
if (maxTextures) {
*maxTextures = fMaxCount;
void GrResourceCache::getLimits(int* maxResources, size_t* maxResourceBytes) const{
if (maxResources) {
*maxResources = fMaxCount;
}
if (maxTextureBytes) {
*maxTextureBytes = fMaxBytes;
if (maxResourceBytes) {
*maxResourceBytes = fMaxBytes;
}
}
void GrTextureCache::setLimits(int maxTextures, size_t maxTextureBytes) {
bool smaller = (maxTextures < fMaxCount) || (maxTextureBytes < fMaxBytes);
void GrResourceCache::setLimits(int maxResources, size_t maxResourceBytes) {
bool smaller = (maxResources < fMaxCount) || (maxResourceBytes < fMaxBytes);
fMaxCount = maxTextures;
fMaxBytes = maxTextureBytes;
fMaxCount = maxResources;
fMaxBytes = maxResourceBytes;
if (smaller) {
this->purgeAsNeeded();
}
}
void GrTextureCache::internalDetach(GrTextureEntry* entry,
void GrResourceCache::internalDetach(GrResourceEntry* entry,
bool clientDetach) {
GrTextureEntry* prev = entry->fPrev;
GrTextureEntry* next = entry->fNext;
GrResourceEntry* prev = entry->fPrev;
GrResourceEntry* next = entry->fNext;
if (prev) {
prev->fNext = next;
@ -97,14 +97,14 @@ void GrTextureCache::internalDetach(GrTextureEntry* entry,
// update our stats
if (clientDetach) {
fClientDetachedCount += 1;
fClientDetachedBytes += entry->texture()->sizeInBytes();
fClientDetachedBytes += entry->resource()->sizeInBytes();
} else {
fEntryCount -= 1;
fEntryBytes -= entry->texture()->sizeInBytes();
fEntryBytes -= entry->resource()->sizeInBytes();
}
}
void GrTextureCache::attachToHead(GrTextureEntry* entry,
void GrResourceCache::attachToHead(GrResourceEntry* entry,
bool clientReattach) {
entry->fPrev = NULL;
entry->fNext = fHead;
@ -119,19 +119,19 @@ void GrTextureCache::attachToHead(GrTextureEntry* entry,
// update our stats
if (clientReattach) {
fClientDetachedCount -= 1;
fClientDetachedBytes -= entry->texture()->sizeInBytes();
fClientDetachedBytes -= entry->resource()->sizeInBytes();
} else {
fEntryCount += 1;
fEntryBytes += entry->texture()->sizeInBytes();
fEntryBytes += entry->resource()->sizeInBytes();
}
}
class GrTextureCache::Key {
typedef GrTextureEntry T;
class GrResourceCache::Key {
typedef GrResourceEntry T;
const GrTextureKey& fKey;
const GrResourceKey& fKey;
public:
Key(const GrTextureKey& key) : fKey(key) {}
Key(const GrResourceKey& key) : fKey(key) {}
uint32_t getHash() const { return fKey.hashIndex(); }
@ -154,10 +154,10 @@ public:
#endif
};
GrTextureEntry* GrTextureCache::findAndLock(const GrTextureKey& key) {
GrAutoTextureCacheValidate atcv(this);
GrResourceEntry* GrResourceCache::findAndLock(const GrResourceKey& key) {
GrAutoResourceCacheValidate atcv(this);
GrTextureEntry* entry = fCache.find(key);
GrResourceEntry* entry = fCache.find(key);
if (entry) {
this->internalDetach(entry, false);
this->attachToHead(entry, false);
@ -167,18 +167,18 @@ GrTextureEntry* GrTextureCache::findAndLock(const GrTextureKey& key) {
return entry;
}
GrTextureEntry* GrTextureCache::createAndLock(const GrTextureKey& key,
GrTexture* texture) {
GrAutoTextureCacheValidate atcv(this);
GrResourceEntry* GrResourceCache::createAndLock(const GrResourceKey& key,
GrResource* resource) {
GrAutoResourceCacheValidate atcv(this);
GrTextureEntry* entry = new GrTextureEntry(key, texture);
GrResourceEntry* entry = new GrResourceEntry(key, resource);
this->attachToHead(entry, false);
fCache.insert(key, entry);
#if GR_DUMP_TEXTURE_UPLOAD
GrPrintf("--- add texture to cache %p, count=%d bytes= %d %d\n",
entry, fEntryCount, texture->sizeInBytes(), fEntryBytes);
GrPrintf("--- add resource to cache %p, count=%d bytes= %d %d\n",
entry, fEntryCount, resource->sizeInBytes(), fEntryBytes);
#endif
// mark the entry as "busy" so it doesn't get purged
@ -187,19 +187,19 @@ GrTextureEntry* GrTextureCache::createAndLock(const GrTextureKey& key,
return entry;
}
void GrTextureCache::detach(GrTextureEntry* entry) {
void GrResourceCache::detach(GrResourceEntry* entry) {
internalDetach(entry, true);
fCache.remove(entry->fKey, entry);
}
void GrTextureCache::reattachAndUnlock(GrTextureEntry* entry) {
void GrResourceCache::reattachAndUnlock(GrResourceEntry* entry) {
attachToHead(entry, true);
fCache.insert(entry->key(), entry);
unlock(entry);
}
void GrTextureCache::unlock(GrTextureEntry* entry) {
GrAutoTextureCacheValidate atcv(this);
void GrResourceCache::unlock(GrResourceEntry* entry) {
GrAutoResourceCacheValidate atcv(this);
GrAssert(entry);
GrAssert(entry->isLocked());
@ -209,16 +209,16 @@ void GrTextureCache::unlock(GrTextureEntry* entry) {
this->purgeAsNeeded();
}
void GrTextureCache::purgeAsNeeded() {
GrAutoTextureCacheValidate atcv(this);
void GrResourceCache::purgeAsNeeded() {
GrAutoResourceCacheValidate atcv(this);
GrTextureEntry* entry = fTail;
GrResourceEntry* entry = fTail;
while (entry) {
if (fEntryCount <= fMaxCount && fEntryBytes <= fMaxBytes) {
break;
}
GrTextureEntry* prev = entry->fPrev;
GrResourceEntry* prev = entry->fPrev;
if (!entry->isLocked()) {
// remove from our cache
fCache.remove(entry->fKey, entry);
@ -227,9 +227,9 @@ void GrTextureCache::purgeAsNeeded() {
this->internalDetach(entry, false);
#if GR_DUMP_TEXTURE_UPLOAD
GrPrintf("--- ~texture from cache %p [%d %d]\n", entry->texture(),
entry->texture()->width(),
entry->texture()->height());
GrPrintf("--- ~resource from cache %p [%d %d]\n", entry->resource(),
entry->resource()->width(),
entry->resource()->height());
#endif
delete entry;
}
@ -237,15 +237,15 @@ void GrTextureCache::purgeAsNeeded() {
}
}
void GrTextureCache::removeAll() {
void GrResourceCache::removeAll() {
GrAssert(!fClientDetachedCount);
GrAssert(!fClientDetachedBytes);
GrTextureEntry* entry = fHead;
GrResourceEntry* entry = fHead;
while (entry) {
GrAssert(!entry->isLocked());
GrTextureEntry* next = entry->fNext;
GrResourceEntry* next = entry->fNext;
delete entry;
entry = next;
}
@ -259,8 +259,8 @@ void GrTextureCache::removeAll() {
///////////////////////////////////////////////////////////////////////////////
#if GR_DEBUG
static int countMatches(const GrTextureEntry* head, const GrTextureEntry* target) {
const GrTextureEntry* entry = head;
static int countMatches(const GrResourceEntry* head, const GrResourceEntry* target) {
const GrResourceEntry* entry = head;
int count = 0;
while (entry) {
if (target == entry) {
@ -277,7 +277,7 @@ static bool both_zero_or_nonzero(int count, size_t bytes) {
}
#endif
void GrTextureCache::validate() const {
void GrResourceCache::validate() const {
GrAssert(!fHead == !fTail);
GrAssert(both_zero_or_nonzero(fEntryCount, fEntryBytes));
GrAssert(both_zero_or_nonzero(fClientDetachedCount, fClientDetachedBytes));
@ -287,14 +287,14 @@ void GrTextureCache::validate() const {
fCache.validate();
GrTextureEntry* entry = fHead;
GrResourceEntry* entry = fHead;
int count = 0;
size_t bytes = 0;
while (entry) {
entry->validate();
GrAssert(fCache.find(entry->key()));
count += 1;
bytes += entry->texture()->sizeInBytes();
bytes += entry->resource()->sizeInBytes();
entry = entry->fNext;
}
GrAssert(count == fEntryCount - fClientDetachedCount);

View File

@ -1,5 +1,5 @@
/*
Copyright 2010 Google Inc.
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -15,13 +15,13 @@
*/
#ifndef GrTextureCache_DEFINED
#define GrTextureCache_DEFINED
#ifndef GrResourceCache_DEFINED
#define GrResourceCache_DEFINED
#include "GrTypes.h"
#include "GrTHashCache.h"
class GrTexture;
class GrResource;
// return true if a<b, or false if b<a
//
@ -36,11 +36,11 @@ class GrTexture;
} while (0)
/**
* Helper class for GrTextureCache, the Key is used to identify src data for
* a texture. It is identified by 2 32bit data fields which can hold any
* Helper class for GrResourceCache, the Key is used to identify src data for
* a resource. It is identified by 2 32bit data fields which can hold any
* data (uninterpreted by the cache) and a width/height.
*/
class GrTextureKey {
class GrResourceKey {
public:
enum {
kHashBits = 7,
@ -48,51 +48,53 @@ public:
kHashMask = kHashCount - 1
};
GrTextureKey(uint32_t p0, uint32_t p1, uint16_t width, uint16_t height) {
fP0 = p0;
fP1 = p1;
fP2 = width | (height << 16);
GR_DEBUGCODE(fHashIndex = -1);
GrResourceKey(uint32_t p0, uint32_t p1, uint32_t p2, uint32_t p3) {
fP[0] = p0;
fP[1] = p1;
fP[2] = p2;
fP[3] = p3;
this->computeHashIndex();
}
GrTextureKey(const GrTextureKey& src) {
fP0 = src.fP0;
fP1 = src.fP1;
fP2 = src.fP2;
finalize(src.fPrivateBits);
GrResourceKey(uint32_t v[4]) {
memcpy(fP, v, 4 * sizeof(uint32_t));
this->computeHashIndex();
}
GrResourceKey(const GrResourceKey& src) {
memcpy(fP, src.fP, 4 * sizeof(uint32_t));
#if GR_DEBUG
this->computeHashIndex();
GrAssert(fHashIndex == src.fHashIndex);
#endif
fHashIndex = src.fHashIndex;
}
//!< returns hash value [0..kHashMask] for the key
int hashIndex() const { return fHashIndex; }
friend bool operator==(const GrTextureKey& a, const GrTextureKey& b) {
friend bool operator==(const GrResourceKey& a, const GrResourceKey& b) {
GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex);
return a.fP0 == b.fP0 && a.fP1 == b.fP1 && a.fP2 == b.fP2 &&
a.fPrivateBits == b.fPrivateBits;
return 0 == memcmp(a.fP, b.fP, 4 * sizeof(uint32_t));
}
friend bool operator!=(const GrTextureKey& a, const GrTextureKey& b) {
friend bool operator!=(const GrResourceKey& a, const GrResourceKey& b) {
GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex);
return !(a == b);
}
friend bool operator<(const GrTextureKey& a, const GrTextureKey& b) {
RET_IF_LT_OR_GT(a.fP0, b.fP0);
RET_IF_LT_OR_GT(a.fP1, b.fP1);
RET_IF_LT_OR_GT(a.fP2, b.fP2);
return a.fPrivateBits < b.fPrivateBits;
friend bool operator<(const GrResourceKey& a, const GrResourceKey& b) {
RET_IF_LT_OR_GT(a.fP[0], b.fP[0]);
RET_IF_LT_OR_GT(a.fP[1], b.fP[1]);
RET_IF_LT_OR_GT(a.fP[2], b.fP[2]);
return a.fP[3] < b.fP[3];
}
uint32_t getValue32(int i) const {
GrAssert(i >=0 && i < 4);
return fP[i];
}
private:
void finalize(uint32_t privateBits) {
fPrivateBits = privateBits;
this->computeHashIndex();
}
uint16_t width() const { return fP2 & 0xffff; }
uint16_t height() const { return (fP2 >> 16); }
uint32_t getPrivateBits() const { return fPrivateBits; }
static uint32_t rol(uint32_t x) {
return (x >> 24) | (x << 8);
@ -105,7 +107,7 @@ private:
}
void computeHashIndex() {
uint32_t hash = fP0 ^ rol(fP1) ^ ror(fP2) ^ rohalf(fPrivateBits);
uint32_t hash = fP[0] ^ rol(fP[1]) ^ ror(fP[2]) ^ rohalf(fP[3]);
// this way to mix and reduce hash to its index may have to change
// depending on how many bits we allocate to the index
hash ^= hash >> 16;
@ -113,10 +115,7 @@ private:
fHashIndex = hash & kHashMask;
}
uint32_t fP0;
uint32_t fP1;
uint32_t fP2;
uint32_t fPrivateBits;
uint32_t fP[4];
// this is computed from the fP... fields
int fHashIndex;
@ -126,14 +125,14 @@ private:
///////////////////////////////////////////////////////////////////////////////
class GrTextureEntry {
class GrResourceEntry {
public:
GrTexture* texture() const { return fTexture; }
const GrTextureKey& key() const { return fKey; }
GrResource* resource() const { return fResource; }
const GrResourceKey& key() const { return fKey; }
#if GR_DEBUG
GrTextureEntry* next() const { return fNext; }
GrTextureEntry* prev() const { return fPrev; }
GrResourceEntry* next() const { return fNext; }
GrResourceEntry* prev() const { return fPrev; }
#endif
#if GR_DEBUG
@ -143,8 +142,8 @@ public:
#endif
private:
GrTextureEntry(const GrTextureKey& key, GrTexture* texture);
~GrTextureEntry();
GrResourceEntry(const GrResourceKey& key, GrResource* resource);
~GrResourceEntry();
bool isLocked() const { return fLockCount != 0; }
void lock() { ++fLockCount; }
@ -153,18 +152,18 @@ private:
--fLockCount;
}
GrTextureKey fKey;
GrTexture* fTexture;
GrResourceKey fKey;
GrResource* fResource;
// track if we're in use, used when we need to purge
// we only purge unlocked entries
int fLockCount;
// we're a dlinklist
GrTextureEntry* fPrev;
GrTextureEntry* fNext;
GrResourceEntry* fPrev;
GrResourceEntry* fNext;
friend class GrTextureCache;
friend class GrResourceCache;
};
///////////////////////////////////////////////////////////////////////////////
@ -172,17 +171,17 @@ private:
#include "GrTHashCache.h"
/**
* Cache of GrTexture objects.
* Cache of GrResource objects.
*
* These have a corresponding GrTextureKey, built from 96bits identifying the
* texture/bitmap.
* These have a corresponding GrResourceKey, built from 128bits identifying the
* resource.
*
* The cache stores the entries in a double-linked list, which is its LRU.
* When an entry is "locked" (i.e. given to the caller), it is moved to the
* head of the list. If/when we must purge some of the entries, we walk the
* list backwards from the tail, since those are the least recently used.
*
* For fast searches, we maintain a sorted array (based on the GrTextureKey)
* For fast searches, we maintain a sorted array (based on the GrResourceKey)
* which we can bsearch. When a new entry is added, it is inserted into this
* array.
*
@ -190,46 +189,46 @@ private:
* a collision between two keys with the same hash, we fall back on the
* bsearch, and update the hash to reflect the most recent Key requested.
*/
class GrTextureCache {
class GrResourceCache {
public:
GrTextureCache(int maxCount, size_t maxBytes);
~GrTextureCache();
GrResourceCache(int maxCount, size_t maxBytes);
~GrResourceCache();
/**
* Return the current texture cache limits.
* Return the current resource cache limits.
*
* @param maxTextures If non-null, returns maximum number of textures that
* can be held in the cache.
* @param maxTextureBytes If non-null, returns maximum number of bytes of
* texture memory that can be held in the cache.
* @param maxResource If non-null, returns maximum number of resources
* that can be held in the cache.
* @param maxBytes If non-null, returns maximum number of bytes of
* gpu memory that can be held in the cache.
*/
void getLimits(int* maxTextures, size_t* maxTextureBytes) const;
void getLimits(int* maxResources, size_t* maxBytes) const;
/**
* Specify the texture cache limits. If the current cache exceeds either
* Specify the resource cache limits. If the current cache exceeds either
* of these, it will be purged (LRU) to keep the cache within these limits.
*
* @param maxTextures The maximum number of textures that can be held in
* @param maxResources The maximum number of resources that can be held in
* the cache.
* @param maxTextureBytes The maximum number of bytes of texture memory
* that can be held in the cache.
* @param maxBytes The maximum number of bytes of resource memory that
* can be held in the cache.
*/
void setLimits(int maxTextures, size_t maxTextureBytes);
void setLimits(int maxResource, size_t maxResourceBytes);
/**
* Search for an entry with the same Key. If found, "lock" it and return it.
* If not found, return null.
*/
GrTextureEntry* findAndLock(const GrTextureKey&);
GrResourceEntry* findAndLock(const GrResourceKey&);
/**
* Create a new entry, based on the specified key and texture, and return
* Create a new entry, based on the specified key and resource, and return
* its "locked" entry.
*
* Ownership of the texture is transferred to the Entry, which will unref()
* Ownership of the resource is transferred to the Entry, which will unref()
* it when we are purged or deleted.
*/
GrTextureEntry* createAndLock(const GrTextureKey&, GrTexture*);
GrResourceEntry* createAndLock(const GrResourceKey&, GrResource*);
/**
* Detach removes an entry from the cache. This prevents the entry from
@ -237,20 +236,20 @@ public:
* entry still counts against the cache's budget and should be reattached
* when exclusive access is no longer needed.
*/
void detach(GrTextureEntry*);
void detach(GrResourceEntry*);
/**
* Reattaches a texture to the cache and unlocks it. Allows it to be found
* Reattaches a resource to the cache and unlocks it. Allows it to be found
* by a subsequent findAndLock or be purged (provided its lock count is
* now 0.)
*/
void reattachAndUnlock(GrTextureEntry*);
void reattachAndUnlock(GrResourceEntry*);
/**
* When done with an entry, call unlock(entry) on it, which returns it to
* a purgable state.
*/
void unlock(GrTextureEntry*);
void unlock(GrResourceEntry*);
void removeAll();
@ -261,16 +260,16 @@ public:
#endif
private:
void internalDetach(GrTextureEntry*, bool);
void attachToHead(GrTextureEntry*, bool);
void purgeAsNeeded(); // uses kFreeTexture_DeleteMode
void internalDetach(GrResourceEntry*, bool);
void attachToHead(GrResourceEntry*, bool);
void purgeAsNeeded(); // uses kFreeResource_DeleteMode
class Key;
GrTHashTable<GrTextureEntry, Key, 8> fCache;
GrTHashTable<GrResourceEntry, Key, 8> fCache;
// manage the dlink list
GrTextureEntry* fHead;
GrTextureEntry* fTail;
GrResourceEntry* fHead;
GrResourceEntry* fTail;
// our budget, used in purgeAsNeeded()
int fMaxCount;
@ -286,21 +285,21 @@ private:
///////////////////////////////////////////////////////////////////////////////
#if GR_DEBUG
class GrAutoTextureCacheValidate {
class GrAutoResourceCacheValidate {
public:
GrAutoTextureCacheValidate(GrTextureCache* cache) : fCache(cache) {
GrAutoResourceCacheValidate(GrResourceCache* cache) : fCache(cache) {
cache->validate();
}
~GrAutoTextureCacheValidate() {
~GrAutoResourceCacheValidate() {
fCache->validate();
}
private:
GrTextureCache* fCache;
GrResourceCache* fCache;
};
#else
class GrAutoTextureCacheValidate {
class GrAutoResourceCacheValidate {
public:
GrAutoTextureCacheValidate(GrTextureCache*) {}
GrAutoResourceCacheValidate(GrResourceCache*) {}
};
#endif

View File

@ -132,7 +132,6 @@
'../gpu/include/GrTextContext.h',
'../gpu/include/GrTextStrike.h',
'../gpu/include/GrTexture.h',
'../gpu/include/GrTextureCache.h',
'../gpu/include/GrTHashCache.h',
'../gpu/include/GrTLList.h',
'../gpu/include/GrTypes.h',
@ -173,13 +172,14 @@
'../gpu/src/GrRectanizer.cpp',
'../gpu/src/GrRedBlackTree.h',
'../gpu/src/GrResource.cpp',
'../gpu/src/GrResourceCache.cpp',
'../gpu/src/GrResourceCache.h',
'../gpu/src/GrStencil.cpp',
'../gpu/src/GrTesselatedPathRenderer.cpp',
'../gpu/src/GrTextContext.cpp',
'../gpu/src/GrTextStrike.cpp',
'../gpu/src/GrTextStrike_impl.h',
'../gpu/src/GrTexture.cpp',
'../gpu/src/GrTextureCache.cpp',
'../gpu/src/gr_unittests.cpp',
'../gpu/src/mac/GrGLDefaultInterface_mac.cpp',

View File

@ -22,6 +22,7 @@
#include "SkBitmap.h"
#include "SkDevice.h"
#include "SkRegion.h"
#include "GrContext.h"
struct SkDrawProcs;
struct GrSkDrawProcs;
@ -126,18 +127,16 @@ public:
virtual void makeRenderTargetCurrent();
protected:
class TexCache;
typedef GrContext::TextureCacheEntry TexCache;
enum TexType {
kBitmap_TexType,
kDeviceRenderTarget_TexType,
kSaveLayerDeviceRenderTarget_TexType
};
TexCache* lockCachedTexture(const SkBitmap& bitmap,
TexCache lockCachedTexture(const SkBitmap& bitmap,
const GrSamplerState& sampler,
GrTexture** texture,
TexType type = kBitmap_TexType);
void unlockCachedTexture(TexCache*);
void unlockCachedTexture(TexCache);
class SkAutoCachedTexture {
public:
@ -152,7 +151,7 @@ protected:
private:
SkGpuDevice* fDevice;
TexCache* fTex;
TexCache fTex;
};
friend class SkAutoTexCache;
@ -162,7 +161,7 @@ private:
GrSkDrawProcs* fDrawProcs;
// state for our offscreen render-target
TexCache* fCache;
TexCache fCache;
GrTexture* fTexture;
GrRenderTarget* fRenderTarget;
bool fNeedClear;

View File

@ -231,8 +231,9 @@ private:
////////////////////////////////////////////////////////////////////////////////
// Helper functions
GrTextureEntry* sk_gr_create_bitmap_texture(GrContext* ctx,
GrTextureKey* key,
static const GrContext::TextureKey gUNCACHED_KEY = ~0;
GrContext::TextureCacheEntry sk_gr_create_bitmap_texture(GrContext* ctx,
GrContext::TextureKey key,
const GrSamplerState& sampler,
const SkBitmap& bitmap);

View File

@ -67,34 +67,33 @@ SkGpuDevice::SkAutoCachedTexture::
const GrSamplerState& sampler,
GrTexture** texture) {
GrAssert(texture);
fTex = NULL;
*texture = this->set(device, bitmap, sampler);
}
SkGpuDevice::SkAutoCachedTexture::SkAutoCachedTexture() {
fTex = NULL;
}
GrTexture* SkGpuDevice::SkAutoCachedTexture::set(SkGpuDevice* device,
const SkBitmap& bitmap,
const GrSamplerState& sampler) {
if (fTex) {
if (fTex.texture()) {
fDevice->unlockCachedTexture(fTex);
}
fDevice = device;
GrTexture* texture = (GrTexture*)bitmap.getTexture();
if (texture) {
// return the native texture
fTex = NULL;
fTex.reset();
} else {
// look it up in our cache
fTex = device->lockCachedTexture(bitmap, sampler, &texture);
fTex = device->lockCachedTexture(bitmap, sampler);
texture = fTex.texture();
}
return texture;
}
SkGpuDevice::SkAutoCachedTexture::~SkAutoCachedTexture() {
if (fTex) {
if (fTex.texture()) {
fDevice->unlockCachedTexture(fTex);
}
}
@ -170,7 +169,6 @@ void SkGpuDevice::initFromRenderTarget(GrContext* context,
fContext = context;
fContext->ref();
fCache = NULL;
fTexture = NULL;
fRenderTarget = NULL;
fNeedClear = false;
@ -199,7 +197,6 @@ SkGpuDevice::SkGpuDevice(GrContext* context, SkBitmap::Config config, int width,
fContext = context;
fContext->ref();
fCache = NULL;
fTexture = NULL;
fRenderTarget = NULL;
fNeedClear = false;
@ -214,10 +211,9 @@ SkGpuDevice::SkGpuDevice(GrContext* context, SkBitmap::Config config, int width,
TexType type = (kSaveLayer_Usage == usage) ?
kSaveLayerDeviceRenderTarget_TexType :
kDeviceRenderTarget_TexType;
fCache = this->lockCachedTexture(bm, GrSamplerState::ClampNoFilter(),
&fTexture, type);
if (fCache) {
SkASSERT(NULL != fTexture);
fCache = this->lockCachedTexture(bm, GrSamplerState::ClampNoFilter(), type);
fTexture = fCache.texture();
if (fTexture) {
SkASSERT(NULL != fTexture->asRenderTarget());
// hold a ref directly on fTexture (even though fCache has one) to match
// other constructor paths. Simplifies cleanup.
@ -260,10 +256,10 @@ SkGpuDevice::~SkGpuDevice() {
SkSafeUnref(fTexture);
SkSafeUnref(fRenderTarget);
if (fCache) {
if (fCache.texture()) {
GrAssert(NULL != fTexture);
GrAssert(fRenderTarget == fTexture->asRenderTarget());
fContext->unlockTexture((GrTextureEntry*)fCache);
fContext->unlockTexture(fCache);
}
fContext->unref();
}
@ -895,15 +891,13 @@ static bool drawWithGPUMaskFilter(GrContext* context, const SkPath& path,
kRGBA_8888_GrPixelConfig
};
GrTextureEntry* srcEntry = context->findApproximateKeylessTexture(desc);
GrTextureEntry* dstEntry = context->findApproximateKeylessTexture(desc);
GrAutoUnlockTextureEntry srcLock(context, srcEntry),
dstLock(context, dstEntry);
if (NULL == srcEntry || NULL == dstEntry) {
GrAutoScratchTexture srcEntry(context, desc);
GrAutoScratchTexture dstEntry(context, desc);
if (NULL == srcEntry.texture() || NULL == dstEntry.texture()) {
return false;
}
GrTexture* srcTexture = srcEntry->texture();
GrTexture* dstTexture = dstEntry->texture();
GrTexture* srcTexture = srcEntry.texture();
GrTexture* dstTexture = dstEntry.texture();
if (NULL == srcTexture || NULL == dstTexture) {
return false;
}
@ -939,18 +933,18 @@ static bool drawWithGPUMaskFilter(GrContext* context, const SkPath& path,
paint.reset();
paint.getTextureSampler(0)->setFilter(GrSamplerState::kBilinear_Filter);
paint.getTextureSampler(0)->setMatrix(sampleM);
GrTextureEntry* origEntry = NULL;
GrAutoScratchTexture origEntry;
if (blurType != SkMaskFilter::kNormal_BlurType) {
// Stash away a copy of the unblurred image.
origEntry = context->findApproximateKeylessTexture(desc);
if (NULL == origEntry) {
origEntry.set(context, desc);
if (NULL == origEntry.texture()) {
return false;
}
context->setRenderTarget(origEntry->texture()->asRenderTarget());
context->setRenderTarget(origEntry.texture()->asRenderTarget());
paint.setTexture(0, srcTexture);
context->drawRect(paint, srcRect);
}
GrAutoUnlockTextureEntry origLock(context, origEntry);
for (int i = 1; i < scaleFactor; i *= 2) {
sampleM.setIDiv(srcTexture->width(), srcTexture->height());
paint.getTextureSampler(0)->setMatrix(sampleM);
@ -1011,7 +1005,7 @@ static bool drawWithGPUMaskFilter(GrContext* context, const SkPath& path,
}
if (blurType != SkMaskFilter::kNormal_BlurType) {
GrTexture* origTexture = origEntry->texture();
GrTexture* origTexture = origEntry.texture();
paint.getTextureSampler(0)->setFilter(GrSamplerState::kNearest_Filter);
sampleM.setIDiv(origTexture->width(), origTexture->height());
paint.getTextureSampler(0)->setMatrix(sampleM);
@ -1102,9 +1096,8 @@ static bool drawWithMaskFilter(GrContext* context, const SkPath& path,
kAlpha_8_GrPixelConfig
};
GrAutoUnlockTextureEntry aute(context,
context->findApproximateKeylessTexture(desc));
GrTexture* texture = aute.texture();
GrAutoScratchTexture ast(context, desc);
GrTexture* texture = ast.texture();
if (NULL == texture) {
return false;
@ -1723,12 +1716,10 @@ bool SkGpuDevice::filterTextFlags(const SkPaint& paint, TextFlags* flags) {
///////////////////////////////////////////////////////////////////////////////
SkGpuDevice::TexCache* SkGpuDevice::lockCachedTexture(const SkBitmap& bitmap,
SkGpuDevice::TexCache SkGpuDevice::lockCachedTexture(const SkBitmap& bitmap,
const GrSamplerState& sampler,
GrTexture** texture,
TexType type) {
GrTexture* newTexture = NULL;
GrTextureEntry* entry = NULL;
GrContext::TextureCacheEntry entry;
GrContext* ctx = this->context();
if (kBitmap_TexType != type) {
@ -1739,46 +1730,41 @@ SkGpuDevice::TexCache* SkGpuDevice::lockCachedTexture(const SkBitmap& bitmap,
bitmap.height(),
SkGr::Bitmap2PixelConfig(bitmap)
};
GrContext::ScratchTexMatch match;
if (kSaveLayerDeviceRenderTarget_TexType == type) {
// we know layers will only be drawn through drawDevice.
// drawDevice has been made to work with content embedded in a
// larger texture so its okay to use the approximate version.
entry = ctx->findApproximateKeylessTexture(desc);
match = GrContext::kApprox_ScratchTexMatch;
} else {
SkASSERT(kDeviceRenderTarget_TexType == type);
entry = ctx->lockKeylessTexture(desc);
match = GrContext::kExact_ScratchTexMatch;
}
entry = ctx->lockScratchTexture(desc, match);
} else {
if (!bitmap.isVolatile()) {
uint32_t p0, p1;
p0 = bitmap.getGenerationID();
p1 = bitmap.pixelRefOffset();
GrTextureKey key(p0, p1, bitmap.width(), bitmap.height());
GrContext::TextureKey key = bitmap.getGenerationID();
key |= ((uint64_t) bitmap.pixelRefOffset()) << 32;
entry = ctx->findAndLockTexture(&key, sampler);
if (NULL == entry)
entry = sk_gr_create_bitmap_texture(ctx, &key, sampler,
entry = ctx->findAndLockTexture(key, bitmap.width(),
bitmap.height(), sampler);
if (NULL == entry.texture()) {
entry = sk_gr_create_bitmap_texture(ctx, key, sampler,
bitmap);
} else {
entry = sk_gr_create_bitmap_texture(ctx, NULL, sampler, bitmap);
}
if (NULL == entry) {
} else {
entry = sk_gr_create_bitmap_texture(ctx, gUNCACHED_KEY, sampler, bitmap);
}
if (NULL == entry.texture()) {
GrPrintf("---- failed to create texture for cache [%d %d]\n",
bitmap.width(), bitmap.height());
}
}
if (NULL != entry) {
newTexture = entry->texture();
if (texture) {
*texture = newTexture;
}
}
return (TexCache*)entry;
return entry;
}
void SkGpuDevice::unlockCachedTexture(TexCache* cache) {
this->context()->unlockTexture((GrTextureEntry*)cache);
void SkGpuDevice::unlockCachedTexture(TexCache cache) {
this->context()->unlockTexture(cache);
}
SkDevice* SkGpuDevice::onCreateCompatibleDevice(SkBitmap::Config config,

View File

@ -63,13 +63,15 @@ static void build_compressed_data(void* buffer, const SkBitmap& bitmap) {
////////////////////////////////////////////////////////////////////////////////
GrTextureEntry* sk_gr_create_bitmap_texture(GrContext* ctx,
GrTextureKey* key,
GrContext::TextureCacheEntry sk_gr_create_bitmap_texture(GrContext* ctx,
GrContext::TextureKey key,
const GrSamplerState& sampler,
const SkBitmap& origBitmap) {
SkAutoLockPixels alp(origBitmap);
GrContext::TextureCacheEntry entry;
if (!origBitmap.readyToDraw()) {
return NULL;
return entry;
}
SkBitmap tmpBitmap;
@ -98,12 +100,13 @@ GrTextureEntry* sk_gr_create_bitmap_texture(GrContext* ctx,
// our compressed data will be trimmed, so pass width() for its
// "rowBytes", since they are the same now.
if (NULL != key) {
if (gUNCACHED_KEY != key) {
return ctx->createAndLockTexture(key, sampler, desc, storage.get(),
bitmap->width());
} else {
GrTextureEntry* entry = ctx->lockKeylessTexture(desc);
entry->texture()->uploadTextureData(0, 0, bitmap->width(),
entry = ctx->lockScratchTexture(desc,
GrContext::kExact_ScratchTexMatch);
entry.texture()->uploadTextureData(0, 0, bitmap->width(),
bitmap->height(), storage.get(), 0);
return entry;
}
@ -116,12 +119,14 @@ GrTextureEntry* sk_gr_create_bitmap_texture(GrContext* ctx,
}
desc.fFormat = SkGr::Bitmap2PixelConfig(*bitmap);
if (NULL != key) {
if (gUNCACHED_KEY != key) {
return ctx->createAndLockTexture(key, sampler, desc,
bitmap->getPixels(), bitmap->rowBytes());
bitmap->getPixels(),
bitmap->rowBytes());
} else {
GrTextureEntry* entry = ctx->lockKeylessTexture(desc);
entry->texture()->uploadTextureData(0, 0, bitmap->width(),
entry = ctx->lockScratchTexture(desc,
GrContext::kExact_ScratchTexMatch);
entry.texture()->uploadTextureData(0, 0, bitmap->width(),
bitmap->height(), bitmap->getPixels(), bitmap->rowBytes());
return entry;
}