Revert "GrResourceCache2 manages scratch texture."
This reverts commit d14e1a2764
.
This commit is contained in:
parent
65be97d1a1
commit
9323b8b8e1
@ -92,8 +92,8 @@ protected:
|
||||
|
||||
GrDrawState* drawState = tt.target()->drawState();
|
||||
|
||||
SkAutoTUnref<GrTexture> texture(GrRefCachedBitmapTexture(context, fBmp, NULL));
|
||||
if (!texture) {
|
||||
GrTexture* texture = GrLockAndRefCachedBitmapTexture(context, fBmp, NULL);
|
||||
if (NULL == texture) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -144,6 +144,7 @@ protected:
|
||||
y += renderRect.height() + kTestPad;
|
||||
}
|
||||
}
|
||||
GrUnlockAndUnrefCachedBitmapTexture(texture);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -83,12 +83,11 @@ protected:
|
||||
|
||||
GrDrawState* drawState = tt.target()->drawState();
|
||||
|
||||
SkAutoTUnref<GrTexture> texture[3];
|
||||
texture[0].reset(GrRefCachedBitmapTexture(context, fBmp[0], NULL));
|
||||
texture[1].reset(GrRefCachedBitmapTexture(context, fBmp[1], NULL));
|
||||
texture[2].reset(GrRefCachedBitmapTexture(context, fBmp[2], NULL));
|
||||
|
||||
if (!texture[0] || !texture[1] || !texture[2]) {
|
||||
GrTexture* texture[3];
|
||||
texture[0] = GrLockAndRefCachedBitmapTexture(context, fBmp[0], NULL);
|
||||
texture[1] = GrLockAndRefCachedBitmapTexture(context, fBmp[1], NULL);
|
||||
texture[2] = GrLockAndRefCachedBitmapTexture(context, fBmp[2], NULL);
|
||||
if ((NULL == texture[0]) || (NULL == texture[1]) || (NULL == texture[2])) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -98,35 +97,38 @@ protected:
|
||||
|
||||
for (int space = kJPEG_SkYUVColorSpace; space <= kLastEnum_SkYUVColorSpace;
|
||||
++space) {
|
||||
SkRect renderRect = SkRect::MakeWH(SkIntToScalar(fBmp[0].width()),
|
||||
SkIntToScalar(fBmp[0].height()));
|
||||
renderRect.outset(kDrawPad, kDrawPad);
|
||||
SkRect renderRect = SkRect::MakeWH(SkIntToScalar(fBmp[0].width()),
|
||||
SkIntToScalar(fBmp[0].height()));
|
||||
renderRect.outset(kDrawPad, kDrawPad);
|
||||
|
||||
SkScalar y = kDrawPad + kTestPad + space * kColorSpaceOffset;
|
||||
SkScalar x = kDrawPad + kTestPad;
|
||||
SkScalar y = kDrawPad + kTestPad + space * kColorSpaceOffset;
|
||||
SkScalar x = kDrawPad + kTestPad;
|
||||
|
||||
const int indices[6][3] = {{0, 1, 2}, {0, 2, 1}, {1, 0, 2},
|
||||
{1, 2, 0}, {2, 0, 1}, {2, 1, 0}};
|
||||
const int indices[6][3] = {{0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}};
|
||||
|
||||
for (int i = 0; i < 6; ++i) {
|
||||
SkAutoTUnref<GrFragmentProcessor> fp(
|
||||
GrYUVtoRGBEffect::Create(texture[indices[i][0]],
|
||||
texture[indices[i][1]],
|
||||
texture[indices[i][2]],
|
||||
static_cast<SkYUVColorSpace>(space)));
|
||||
if (fp) {
|
||||
SkMatrix viewMatrix;
|
||||
viewMatrix.setTranslate(x, y);
|
||||
drawState->reset(viewMatrix);
|
||||
drawState->setRenderTarget(rt);
|
||||
drawState->setColor(0xffffffff);
|
||||
drawState->addColorProcessor(fp);
|
||||
tt.target()->drawSimpleRect(renderRect);
|
||||
}
|
||||
x += renderRect.width() + kTestPad;
|
||||
}
|
||||
for (int i = 0; i < 6; ++i) {
|
||||
SkAutoTUnref<GrFragmentProcessor> fp(
|
||||
GrYUVtoRGBEffect::Create(texture[indices[i][0]],
|
||||
texture[indices[i][1]],
|
||||
texture[indices[i][2]],
|
||||
static_cast<SkYUVColorSpace>(space)));
|
||||
if (fp) {
|
||||
SkMatrix viewMatrix;
|
||||
viewMatrix.setTranslate(x, y);
|
||||
drawState->reset(viewMatrix);
|
||||
drawState->setRenderTarget(rt);
|
||||
drawState->setColor(0xffffffff);
|
||||
drawState->addColorProcessor(fp);
|
||||
tt.target()->drawSimpleRect(renderRect);
|
||||
}
|
||||
x += renderRect.width() + kTestPad;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GrUnlockAndUnrefCachedBitmapTexture(texture[0]);
|
||||
GrUnlockAndUnrefCachedBitmapTexture(texture[1]);
|
||||
GrUnlockAndUnrefCachedBitmapTexture(texture[2]);
|
||||
}
|
||||
|
||||
private:
|
||||
SkBitmap fBmp[3];
|
||||
|
@ -290,13 +290,15 @@ public:
|
||||
* tiling non-power-of-two textures on APIs that don't support this (e.g.
|
||||
* unextended GLES2). Tiling a NPOT texture created by lockScratchTexture on
|
||||
* such an API will create gaps in the tiling pattern. This includes clamp
|
||||
* mode. (This may be addressed in a future update.)7
|
||||
*
|
||||
* internalFlag is a temporary workaround until changes in the internal
|
||||
* architecture are complete. Use the default value.
|
||||
* mode. (This may be addressed in a future update.)
|
||||
*/
|
||||
GrTexture* lockAndRefScratchTexture(const GrTextureDesc&, ScratchTexMatch match,
|
||||
bool internalFlag = false);
|
||||
GrTexture* lockAndRefScratchTexture(const GrTextureDesc&, ScratchTexMatch match);
|
||||
|
||||
/**
|
||||
* When done with an entry, call unlockScratchTexture(entry) on it, which returns
|
||||
* it to the cache, where it may be purged. This does not unref the texture.
|
||||
*/
|
||||
void unlockScratchTexture(GrTexture* texture);
|
||||
|
||||
/**
|
||||
* Creates a texture that is outside the cache. Does not count against
|
||||
@ -1073,7 +1075,15 @@ private:
|
||||
size_t rowBytes,
|
||||
bool filter);
|
||||
|
||||
bool createNewScratchTexture(const GrTextureDesc& desc);
|
||||
// Needed so GrTexture's returnToCache helper function can call
|
||||
// addExistingTextureToCache
|
||||
friend class GrTexture;
|
||||
friend class GrStencilAndCoverPathRenderer;
|
||||
friend class GrStencilAndCoverTextContext;
|
||||
|
||||
// Add an existing texture to the texture cache. This is intended solely
|
||||
// for use with textures released from an GrAutoScratchTexture.
|
||||
void addExistingTextureToCache(GrTexture* texture);
|
||||
|
||||
/**
|
||||
* These functions create premul <-> unpremul effects if it is possible to generate a pair
|
||||
@ -1093,7 +1103,8 @@ private:
|
||||
};
|
||||
|
||||
/**
|
||||
* This is deprecated. Don't use it.
|
||||
* Gets and locks a scratch texture from a descriptor using either exact or approximate criteria.
|
||||
* Unlocks texture in the destructor.
|
||||
*/
|
||||
class SK_API GrAutoScratchTexture : public ::SkNoncopyable {
|
||||
public:
|
||||
@ -1104,11 +1115,10 @@ public:
|
||||
|
||||
GrAutoScratchTexture(GrContext* context,
|
||||
const GrTextureDesc& desc,
|
||||
GrContext::ScratchTexMatch match = GrContext::kApprox_ScratchTexMatch,
|
||||
bool internalFlag = false)
|
||||
GrContext::ScratchTexMatch match = GrContext::kApprox_ScratchTexMatch)
|
||||
: fContext(NULL)
|
||||
, fTexture(NULL) {
|
||||
this->set(context, desc, match, internalFlag);
|
||||
this->set(context, desc, match);
|
||||
}
|
||||
|
||||
~GrAutoScratchTexture() {
|
||||
@ -1117,26 +1127,34 @@ public:
|
||||
|
||||
void reset() {
|
||||
if (fContext && fTexture) {
|
||||
fContext->unlockScratchTexture(fTexture);
|
||||
fTexture->unref();
|
||||
fTexture = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
GrTexture* detach() {
|
||||
GrTexture* texture = fTexture;
|
||||
fTexture = NULL;
|
||||
return texture;
|
||||
}
|
||||
/*
|
||||
* When detaching a texture we do not unlock it in the texture cache but
|
||||
* we do set the returnToCache flag. In this way the texture remains
|
||||
* "locked" in the texture cache until it is freed and recycled in
|
||||
* GrTexture::internal_dispose. In reality, the texture has been removed
|
||||
* from the cache (because this is in AutoScratchTexture) and by not
|
||||
* calling unlockScratchTexture we simply don't re-add it. It will be
|
||||
* reattached in GrTexture::internal_dispose.
|
||||
*
|
||||
* Note that the caller is assumed to accept and manage the ref to the
|
||||
* returned texture.
|
||||
*/
|
||||
GrTexture* detach();
|
||||
|
||||
GrTexture* set(GrContext* context,
|
||||
const GrTextureDesc& desc,
|
||||
GrContext::ScratchTexMatch match = GrContext::kApprox_ScratchTexMatch,
|
||||
bool internalFlag = 0) {
|
||||
GrContext::ScratchTexMatch match = GrContext::kApprox_ScratchTexMatch) {
|
||||
this->reset();
|
||||
|
||||
fContext = context;
|
||||
if (fContext) {
|
||||
fTexture = fContext->lockAndRefScratchTexture(desc, match, internalFlag);
|
||||
fTexture = fContext->lockAndRefScratchTexture(desc, match);
|
||||
if (NULL == fTexture) {
|
||||
fContext = NULL;
|
||||
}
|
||||
|
@ -49,21 +49,27 @@ public:
|
||||
// templated helper classes (e.g. SkAutoTUnref). However, we have different categories of
|
||||
// refs (e.g. pending reads). We also don't require thread safety as GrCacheable objects are
|
||||
// not intended to cross thread boundaries.
|
||||
// internal_dispose() exists because of GrTexture's reliance on it. It will be removed
|
||||
// soon.
|
||||
void ref() const {
|
||||
this->validate();
|
||||
++fRefCnt;
|
||||
// pre-validate once internal_dispose is removed (and therefore 0 ref cnt is not allowed).
|
||||
this->validate();
|
||||
}
|
||||
|
||||
void unref() const {
|
||||
this->validate();
|
||||
--fRefCnt;
|
||||
if (0 == fRefCnt && 0 == fPendingReads && 0 == fPendingWrites) {
|
||||
SkDELETE(this);
|
||||
this->internal_dispose();
|
||||
}
|
||||
}
|
||||
|
||||
bool isPurgable() const { return this->reffedOnlyByCache() && !this->internalHasPendingIO(); }
|
||||
bool reffedOnlyByCache() const { return 1 == fRefCnt; }
|
||||
virtual void internal_dispose() const { SkDELETE(this); }
|
||||
|
||||
/** This is exists to service the old mechanism for recycling scratch textures. It will
|
||||
be removed soon. */
|
||||
bool unique() const { return 1 == (fRefCnt + fPendingReads + fPendingWrites); }
|
||||
|
||||
void validate() const {
|
||||
#ifdef SK_DEBUG
|
||||
@ -74,8 +80,9 @@ public:
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
protected:
|
||||
GrIORef() : fRefCnt(1), fPendingReads(0), fPendingWrites(0), fIsScratch(kNo_IsScratch) { }
|
||||
GrIORef() : fRefCnt(1), fPendingReads(0), fPendingWrites(0) {}
|
||||
|
||||
bool internalHasPendingRead() const { return SkToBool(fPendingReads); }
|
||||
bool internalHasPendingWrite() const { return SkToBool(fPendingWrites); }
|
||||
@ -91,7 +98,7 @@ private:
|
||||
this->validate();
|
||||
--fPendingReads;
|
||||
if (0 == fRefCnt && 0 == fPendingReads && 0 == fPendingWrites) {
|
||||
SkDELETE(this);
|
||||
this->internal_dispose();
|
||||
}
|
||||
}
|
||||
|
||||
@ -104,7 +111,7 @@ private:
|
||||
this->validate();
|
||||
--fPendingWrites;
|
||||
if (0 == fRefCnt && 0 == fPendingReads && 0 == fPendingWrites) {
|
||||
SkDELETE(this);
|
||||
this->internal_dispose();
|
||||
}
|
||||
}
|
||||
|
||||
@ -115,17 +122,6 @@ private:
|
||||
|
||||
// This class is used to manage conversion of refs to pending reads/writes.
|
||||
friend class GrGpuResourceRef;
|
||||
|
||||
// This is temporary until GrResourceCache is fully replaced by GrResourceCache2.
|
||||
enum IsScratch {
|
||||
kNo_IsScratch,
|
||||
kYes_IsScratch
|
||||
} fIsScratch;
|
||||
|
||||
friend class GrContext; // to set the above field.
|
||||
friend class GrResourceCache; // to check the above field.
|
||||
friend class GrResourceCache2; // to check the above field.
|
||||
|
||||
template <typename, IOType> friend class GrPendingIOResource;
|
||||
};
|
||||
|
||||
|
@ -99,6 +99,8 @@ protected:
|
||||
void validateDesc() const;
|
||||
|
||||
private:
|
||||
void abandonReleaseCommon();
|
||||
virtual void internal_dispose() const SK_OVERRIDE;
|
||||
void dirtyMipMaps(bool mipMapsDirty);
|
||||
|
||||
enum MipMapsStatus {
|
||||
|
@ -70,7 +70,9 @@ static inline GrColor SkColor2GrColorJustAlpha(SkColor c) {
|
||||
|
||||
bool GrIsBitmapInCache(const GrContext*, const SkBitmap&, const GrTextureParams*);
|
||||
|
||||
GrTexture* GrRefCachedBitmapTexture(GrContext*, const SkBitmap&, const GrTextureParams*);
|
||||
GrTexture* GrLockAndRefCachedBitmapTexture(GrContext*, const SkBitmap&, const GrTextureParams*);
|
||||
|
||||
void GrUnlockAndUnrefCachedBitmapTexture(GrTexture*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
@ -41,9 +41,11 @@ class SK_API SkGrPixelRef : public SkROLockPixelsPixelRef {
|
||||
public:
|
||||
SK_DECLARE_INST_COUNT(SkGrPixelRef)
|
||||
/**
|
||||
* Constructs a pixel ref around a GrSurface.
|
||||
* Constructs a pixel ref around a GrSurface. If the caller has locked the GrSurface in the
|
||||
* cache and would like the pixel ref to unlock it in its destructor then transferCacheLock
|
||||
* should be set to true.
|
||||
*/
|
||||
SkGrPixelRef(const SkImageInfo&, GrSurface*);
|
||||
SkGrPixelRef(const SkImageInfo&, GrSurface*, bool transferCacheLock = false);
|
||||
virtual ~SkGrPixelRef();
|
||||
|
||||
// override from SkPixelRef
|
||||
@ -56,6 +58,8 @@ protected:
|
||||
|
||||
private:
|
||||
GrSurface* fSurface;
|
||||
bool fUnlock; // if true the pixel ref owns a texture cache lock on fSurface
|
||||
|
||||
typedef SkROLockPixelsPixelRef INHERITED;
|
||||
};
|
||||
|
||||
|
@ -459,9 +459,9 @@ bool SkBitmapProcShader::asFragmentProcessor(GrContext* context, const SkPaint&
|
||||
|
||||
}
|
||||
GrTextureParams params(tm, textureFilterMode);
|
||||
SkAutoTUnref<GrTexture> texture(GrRefCachedBitmapTexture(context, fRawBitmap, ¶ms));
|
||||
GrTexture* texture = GrLockAndRefCachedBitmapTexture(context, fRawBitmap, ¶ms);
|
||||
|
||||
if (!texture) {
|
||||
if (NULL == texture) {
|
||||
SkErrorInternals::SetError( kInternalError_SkError,
|
||||
"Couldn't convert bitmap to texture.");
|
||||
return false;
|
||||
@ -476,6 +476,7 @@ bool SkBitmapProcShader::asFragmentProcessor(GrContext* context, const SkPaint&
|
||||
} else {
|
||||
*fp = GrSimpleTextureEffect::Create(texture, matrix, params);
|
||||
}
|
||||
GrUnlockAndUnrefCachedBitmapTexture(texture);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -399,8 +399,9 @@ bool SkImageFilter::getInputResultGPU(SkImageFilter::Proxy* proxy,
|
||||
if (kUnknown_SkColorType == info.colorType()) {
|
||||
return false;
|
||||
}
|
||||
SkAutoTUnref<GrTexture> resultTex(GrRefCachedBitmapTexture(context, *result, NULL));
|
||||
result->setPixelRef(SkNEW_ARGS(SkGrPixelRef, (info, resultTex)))->unref();
|
||||
GrTexture* resultTex = GrLockAndRefCachedBitmapTexture(context, *result, NULL);
|
||||
result->setPixelRef(new SkGrPixelRef(info, resultTex))->unref();
|
||||
GrUnlockAndUnrefCachedBitmapTexture(resultTex);
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
|
@ -997,10 +997,10 @@ bool SkPerlinNoiseShader::asFragmentProcessor(GrContext* context, const SkPaint&
|
||||
|
||||
SkPerlinNoiseShader::PaintingData* paintingData =
|
||||
SkNEW_ARGS(PaintingData, (fTileSize, fSeed, fBaseFrequencyX, fBaseFrequencyY, matrix));
|
||||
SkAutoTUnref<GrTexture> permutationsTexture(
|
||||
GrRefCachedBitmapTexture(context, paintingData->getPermutationsBitmap(), NULL));
|
||||
SkAutoTUnref<GrTexture> noiseTexture(
|
||||
GrRefCachedBitmapTexture(context, paintingData->getNoiseBitmap(), NULL));
|
||||
GrTexture* permutationsTexture = GrLockAndRefCachedBitmapTexture(
|
||||
context, paintingData->getPermutationsBitmap(), NULL);
|
||||
GrTexture* noiseTexture = GrLockAndRefCachedBitmapTexture(
|
||||
context, paintingData->getNoiseBitmap(), NULL);
|
||||
|
||||
SkMatrix m = context->getMatrix();
|
||||
m.setTranslateX(-localMatrix.getTranslateX() + SK_Scalar1);
|
||||
@ -1016,6 +1016,17 @@ bool SkPerlinNoiseShader::asFragmentProcessor(GrContext* context, const SkPaint&
|
||||
SkDELETE(paintingData);
|
||||
*fp = NULL;
|
||||
}
|
||||
|
||||
// Unlock immediately, this is not great, but we don't have a way of
|
||||
// knowing when else to unlock it currently. TODO: Remove this when
|
||||
// unref becomes the unlock replacement for all types of textures.
|
||||
if (permutationsTexture) {
|
||||
GrUnlockAndUnrefCachedBitmapTexture(permutationsTexture);
|
||||
}
|
||||
if (noiseTexture) {
|
||||
GrUnlockAndUnrefCachedBitmapTexture(noiseTexture);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -419,6 +419,7 @@ void ColorTableEffect::onComputeInvariantOutput(InvariantOutput* inout) const {
|
||||
inout->fIsSingleComponent = false;
|
||||
}
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
GR_DEFINE_FRAGMENT_PROCESSOR_TEST(ColorTableEffect);
|
||||
@ -434,10 +435,19 @@ GrFragmentProcessor* ColorTableEffect::TestCreate(SkRandom* random,
|
||||
|
||||
GrFragmentProcessor* SkTable_ColorFilter::asFragmentProcessor(GrContext* context) const {
|
||||
SkBitmap bitmap;
|
||||
GrFragmentProcessor* fp = NULL;
|
||||
this->asComponentTable(&bitmap);
|
||||
// passing NULL because this effect does no tiling or filtering.
|
||||
SkAutoTUnref<GrTexture> texture(GrRefCachedBitmapTexture(context, bitmap, NULL));
|
||||
return texture ? ColorTableEffect::Create(texture, fFlags) : NULL;
|
||||
GrTexture* texture = GrLockAndRefCachedBitmapTexture(context, bitmap, NULL);
|
||||
if (texture) {
|
||||
fp = ColorTableEffect::Create(texture, fFlags);
|
||||
|
||||
// Unlock immediately, this is not great, but we don't have a way of
|
||||
// knowing when else to unlock it currently. TODO: Remove this when
|
||||
// unref becomes the unlock replacement for all types of textures.
|
||||
GrUnlockAndUnrefCachedBitmapTexture(texture);
|
||||
}
|
||||
return fp;
|
||||
}
|
||||
|
||||
#endif // SK_SUPPORT_GPU
|
||||
|
@ -1164,10 +1164,15 @@ GrGradientEffect::GrGradientEffect(GrContext* ctx,
|
||||
fCoordTransform.reset(kCoordSet, matrix, fAtlas->getTexture());
|
||||
fTextureAccess.reset(fAtlas->getTexture(), params);
|
||||
} else {
|
||||
SkAutoTUnref<GrTexture> texture(GrRefCachedBitmapTexture(ctx, bitmap, ¶ms));
|
||||
GrTexture* texture = GrLockAndRefCachedBitmapTexture(ctx, bitmap, ¶ms);
|
||||
fCoordTransform.reset(kCoordSet, matrix, texture);
|
||||
fTextureAccess.reset(texture, params);
|
||||
fYCoord = SK_ScalarHalf;
|
||||
|
||||
// Unlock immediately, this is not great, but we don't have a way of
|
||||
// knowing when else to unlock it currently, so it may get purged from
|
||||
// the cache, but it'll still be ref'd until it's no longer being used.
|
||||
GrUnlockAndUnrefCachedBitmapTexture(texture);
|
||||
}
|
||||
this->addTextureAccess(&fTextureAccess);
|
||||
}
|
||||
|
@ -204,9 +204,7 @@ private:
|
||||
|
||||
fLastClipGenID = clipGenID;
|
||||
|
||||
// HACK: set the last param to true to indicate that this request is at
|
||||
// flush time and therefore we require a scratch texture with no pending IO operations.
|
||||
fLastMask.set(context, desc, GrContext::kApprox_ScratchTexMatch, /*flushing=*/true);
|
||||
fLastMask.set(context, desc);
|
||||
|
||||
fLastBound = bound;
|
||||
}
|
||||
|
@ -70,6 +70,25 @@ static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
|
||||
|
||||
#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
|
||||
|
||||
GrTexture* GrAutoScratchTexture::detach() {
|
||||
if (NULL == fTexture) {
|
||||
return NULL;
|
||||
}
|
||||
GrTexture* texture = fTexture;
|
||||
fTexture = NULL;
|
||||
|
||||
// This GrAutoScratchTexture has a ref from lockAndRefScratchTexture, which we give up now.
|
||||
// The cache also has a ref which we are lending to the caller of detach(). When the caller
|
||||
// lets go of the ref and the ref count goes to 0 internal_dispose will see this flag is
|
||||
// set and re-ref the texture, thereby restoring the cache's ref.
|
||||
SkASSERT(!texture->unique());
|
||||
texture->texturePriv().setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit);
|
||||
texture->unref();
|
||||
SkASSERT(texture->getCacheEntry());
|
||||
|
||||
return texture;
|
||||
}
|
||||
|
||||
// Glorified typedef to avoid including GrDrawState.h in GrContext.h
|
||||
class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
|
||||
|
||||
@ -440,81 +459,159 @@ GrTexture* GrContext::createTexture(const GrTextureParams* params,
|
||||
return texture;
|
||||
}
|
||||
|
||||
bool GrContext::createNewScratchTexture(const GrTextureDesc& desc) {
|
||||
SkAutoTUnref<GrTexture> texture(fGpu->createTexture(desc, NULL, 0));
|
||||
if (!texture) {
|
||||
return false;
|
||||
static GrTexture* create_scratch_texture(GrGpu* gpu,
|
||||
GrResourceCache* resourceCache,
|
||||
const GrTextureDesc& desc) {
|
||||
GrTexture* texture = gpu->createTexture(desc, NULL, 0);
|
||||
if (texture) {
|
||||
GrResourceKey key = GrTexturePriv::ComputeScratchKey(texture->desc());
|
||||
// Adding a resource could put us overbudget. Try to free up the
|
||||
// necessary space before adding it.
|
||||
resourceCache->purgeAsNeeded(1, texture->gpuMemorySize());
|
||||
// Make the resource exclusive so future 'find' calls don't return it
|
||||
resourceCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
|
||||
}
|
||||
fResourceCache->addResource(texture->getScratchKey(), texture);
|
||||
texture->fIsScratch = GrIORef::kYes_IsScratch;
|
||||
return true;
|
||||
return texture;
|
||||
}
|
||||
|
||||
GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match,
|
||||
bool calledDuringFlush) {
|
||||
GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
|
||||
|
||||
// kNoStencil has no meaning if kRT isn't set.
|
||||
SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
|
||||
!(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
|
||||
|
||||
// Make sure caller has checked for renderability if kRT is set.
|
||||
SkASSERT(!(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
|
||||
this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0));
|
||||
// Renderable A8 targets are not universally supported (e.g., not on ANGLE)
|
||||
SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig, inDesc.fSampleCnt > 0) ||
|
||||
!(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
|
||||
(inDesc.fConfig != kAlpha_8_GrPixelConfig));
|
||||
|
||||
SkTCopyOnFirstWrite<GrTextureDesc> desc(inDesc);
|
||||
if (!fGpu->caps()->reuseScratchTextures() &&
|
||||
!(inDesc.fFlags & kRenderTarget_GrTextureFlagBit)) {
|
||||
// If we're never recycling this texture we can always make it the right size
|
||||
return create_scratch_texture(fGpu, fResourceCache, inDesc);
|
||||
}
|
||||
|
||||
// There is a regression here in that when reuseScratchTextures is false, the texture won't be
|
||||
// freed when its ref and io counts reach zero. TODO: Make GrResourceCache2 free scratch
|
||||
// resources immediately after it is the sole owner and reuseScratchTextures is false.
|
||||
if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrTextureFlagBit)) {
|
||||
GrTextureFlags origFlags = desc->fFlags;
|
||||
if (kApprox_ScratchTexMatch == match) {
|
||||
// bin by pow2 with a reasonable min
|
||||
static const int MIN_SIZE = 16;
|
||||
GrTextureDesc* wdesc = desc.writable();
|
||||
wdesc->fWidth = SkTMax(MIN_SIZE, GrNextPow2(desc->fWidth));
|
||||
wdesc->fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc->fHeight));
|
||||
GrTextureDesc desc = inDesc;
|
||||
|
||||
if (kApprox_ScratchTexMatch == match) {
|
||||
// bin by pow2 with a reasonable min
|
||||
static const int MIN_SIZE = 16;
|
||||
desc.fWidth = SkTMax(MIN_SIZE, GrNextPow2(desc.fWidth));
|
||||
desc.fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc.fHeight));
|
||||
}
|
||||
|
||||
GrGpuResource* resource = NULL;
|
||||
int origWidth = desc.fWidth;
|
||||
int origHeight = desc.fHeight;
|
||||
|
||||
do {
|
||||
GrResourceKey key = GrTexturePriv::ComputeScratchKey(desc);
|
||||
// Ensure we have exclusive access to the texture so future 'find' calls don't return it
|
||||
resource = fResourceCache->find(key, GrResourceCache::kHide_OwnershipFlag);
|
||||
if (resource) {
|
||||
resource->ref();
|
||||
break;
|
||||
}
|
||||
if (kExact_ScratchTexMatch == match) {
|
||||
break;
|
||||
}
|
||||
// We had a cache miss and we are in approx mode, relax the fit of the flags.
|
||||
|
||||
// We no longer try to reuse textures that were previously used as render targets in
|
||||
// situations where no RT is needed; doing otherwise can confuse the video driver and
|
||||
// cause significant performance problems in some cases.
|
||||
if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
|
||||
desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
do {
|
||||
GrResourceKey key = GrTexturePriv::ComputeScratchKey(*desc);
|
||||
GrGpuResource* resource = fResourceCache2->findAndRefScratchResource(key,
|
||||
calledDuringFlush);
|
||||
if (resource) {
|
||||
fResourceCache->makeResourceMRU(resource);
|
||||
return static_cast<GrTexture*>(resource);
|
||||
}
|
||||
} while (true);
|
||||
|
||||
if (kExact_ScratchTexMatch == match) {
|
||||
break;
|
||||
}
|
||||
// We had a cache miss and we are in approx mode, relax the fit of the flags.
|
||||
|
||||
// We no longer try to reuse textures that were previously used as render targets in
|
||||
// situations where no RT is needed; doing otherwise can confuse the video driver and
|
||||
// cause significant performance problems in some cases.
|
||||
if (desc->fFlags & kNoStencil_GrTextureFlagBit) {
|
||||
desc.writable()->fFlags = desc->fFlags & ~kNoStencil_GrTextureFlagBit;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
} while (true);
|
||||
|
||||
desc.writable()->fFlags = origFlags;
|
||||
if (NULL == resource) {
|
||||
desc.fFlags = inDesc.fFlags;
|
||||
desc.fWidth = origWidth;
|
||||
desc.fHeight = origHeight;
|
||||
resource = create_scratch_texture(fGpu, fResourceCache, desc);
|
||||
}
|
||||
|
||||
if (!this->createNewScratchTexture(*desc)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// If we got here then we didn't find a cached texture, but we just added one.
|
||||
GrResourceKey key = GrTexturePriv::ComputeScratchKey(*desc);
|
||||
GrGpuResource* resource = fResourceCache2->findAndRefScratchResource(key, calledDuringFlush);
|
||||
SkASSERT(resource);
|
||||
return static_cast<GrTexture*>(resource);
|
||||
}
|
||||
|
||||
void GrContext::addExistingTextureToCache(GrTexture* texture) {
|
||||
|
||||
if (NULL == texture) {
|
||||
return;
|
||||
}
|
||||
|
||||
// This texture should already have a cache entry since it was once
|
||||
// attached
|
||||
SkASSERT(texture->getCacheEntry());
|
||||
|
||||
// Conceptually, the cache entry is going to assume responsibility
|
||||
// for the creation ref. Assert refcnt == 1.
|
||||
// Except that this also gets called when the texture is prematurely
|
||||
// abandoned. In that case the ref count may be > 1.
|
||||
// SkASSERT(texture->unique());
|
||||
|
||||
if (fGpu->caps()->reuseScratchTextures() || texture->asRenderTarget()) {
|
||||
// Since this texture came from an AutoScratchTexture it should
|
||||
// still be in the exclusive pile. Recycle it.
|
||||
fResourceCache->makeNonExclusive(texture->getCacheEntry());
|
||||
this->purgeCache();
|
||||
} else {
|
||||
// When we aren't reusing textures we know this scratch texture
|
||||
// will never be reused and would be just wasting time in the cache
|
||||
fResourceCache->makeNonExclusive(texture->getCacheEntry());
|
||||
fResourceCache->deleteResource(texture->getCacheEntry());
|
||||
}
|
||||
}
|
||||
|
||||
void GrContext::unlockScratchTexture(GrTexture* texture) {
|
||||
if (texture->wasDestroyed()) {
|
||||
if (texture->getCacheEntry()->key().isScratch()) {
|
||||
// This texture was detached from the cache but the cache still had a ref to it but
|
||||
// not a pointer to it. This will unref the texture and delete its resource cache
|
||||
// entry.
|
||||
delete texture->getCacheEntry();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT_OWNED_RESOURCE(texture);
|
||||
SkASSERT(texture->getCacheEntry());
|
||||
|
||||
// If this is a scratch texture we detached it from the cache
|
||||
// while it was locked (to avoid two callers simultaneously getting
|
||||
// the same texture).
|
||||
if (texture->getCacheEntry()->key().isScratch()) {
|
||||
if (fGpu->caps()->reuseScratchTextures() || texture->asRenderTarget()) {
|
||||
fResourceCache->makeNonExclusive(texture->getCacheEntry());
|
||||
this->purgeCache();
|
||||
} else if (texture->unique()) {
|
||||
// Only the cache now knows about this texture. Since we're never
|
||||
// reusing scratch textures (in this code path) it would just be
|
||||
// wasting time sitting in the cache.
|
||||
fResourceCache->makeNonExclusive(texture->getCacheEntry());
|
||||
fResourceCache->deleteResource(texture->getCacheEntry());
|
||||
} else {
|
||||
// In this case (there is still a non-cache ref) but we don't really
|
||||
// want to readd it to the cache (since it will never be reused).
|
||||
// Instead, give up the cache's ref and leave the decision up to
|
||||
// addExistingTextureToCache once its ref count reaches 0. For
|
||||
// this to work we need to leave it in the exclusive list.
|
||||
texture->texturePriv().setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit);
|
||||
// Give up the cache's ref to the texture
|
||||
texture->unref();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void GrContext::purgeCache() {
|
||||
if (fResourceCache) {
|
||||
fResourceCache->purgeAsNeeded();
|
||||
}
|
||||
}
|
||||
|
||||
bool GrContext::OverbudgetCB(void* data) {
|
||||
SkASSERT(data);
|
||||
|
||||
|
@ -245,6 +245,7 @@ void GrLayerCache::unlock(GrCachedLayer* layer) {
|
||||
#endif
|
||||
|
||||
} else {
|
||||
fContext->unlockScratchTexture(layer->texture());
|
||||
layer->setTexture(NULL, GrIRect16::MakeEmpty());
|
||||
}
|
||||
|
||||
|
@ -82,10 +82,14 @@ GrResourceCache::GrResourceCache(int maxCount, size_t maxBytes) :
|
||||
#if GR_CACHE_STATS
|
||||
fHighWaterEntryCount = 0;
|
||||
fHighWaterEntryBytes = 0;
|
||||
fHighWaterClientDetachedCount = 0;
|
||||
fHighWaterClientDetachedBytes = 0;
|
||||
#endif
|
||||
|
||||
fEntryCount = 0;
|
||||
fEntryBytes = 0;
|
||||
fClientDetachedCount = 0;
|
||||
fClientDetachedBytes = 0;
|
||||
|
||||
fPurging = false;
|
||||
|
||||
@ -132,26 +136,55 @@ void GrResourceCache::setLimits(int maxResources, size_t maxResourceBytes) {
|
||||
}
|
||||
}
|
||||
|
||||
void GrResourceCache::internalDetach(GrResourceCacheEntry* entry) {
|
||||
void GrResourceCache::internalDetach(GrResourceCacheEntry* entry,
|
||||
BudgetBehaviors behavior) {
|
||||
fList.remove(entry);
|
||||
fEntryCount -= 1;
|
||||
fEntryBytes -= entry->fCachedSize;
|
||||
}
|
||||
|
||||
void GrResourceCache::attachToHead(GrResourceCacheEntry* entry) {
|
||||
fList.addToHead(entry);
|
||||
|
||||
fEntryCount += 1;
|
||||
fEntryBytes += entry->fCachedSize;
|
||||
// update our stats
|
||||
if (kIgnore_BudgetBehavior == behavior) {
|
||||
fClientDetachedCount += 1;
|
||||
fClientDetachedBytes += entry->fCachedSize;
|
||||
|
||||
#if GR_CACHE_STATS
|
||||
if (fHighWaterEntryCount < fEntryCount) {
|
||||
fHighWaterEntryCount = fEntryCount;
|
||||
}
|
||||
if (fHighWaterEntryBytes < fEntryBytes) {
|
||||
fHighWaterEntryBytes = fEntryBytes;
|
||||
}
|
||||
if (fHighWaterClientDetachedCount < fClientDetachedCount) {
|
||||
fHighWaterClientDetachedCount = fClientDetachedCount;
|
||||
}
|
||||
if (fHighWaterClientDetachedBytes < fClientDetachedBytes) {
|
||||
fHighWaterClientDetachedBytes = fClientDetachedBytes;
|
||||
}
|
||||
#endif
|
||||
|
||||
} else {
|
||||
SkASSERT(kAccountFor_BudgetBehavior == behavior);
|
||||
|
||||
fEntryCount -= 1;
|
||||
fEntryBytes -= entry->fCachedSize;
|
||||
}
|
||||
}
|
||||
|
||||
void GrResourceCache::attachToHead(GrResourceCacheEntry* entry,
|
||||
BudgetBehaviors behavior) {
|
||||
fList.addToHead(entry);
|
||||
|
||||
// update our stats
|
||||
if (kIgnore_BudgetBehavior == behavior) {
|
||||
fClientDetachedCount -= 1;
|
||||
fClientDetachedBytes -= entry->fCachedSize;
|
||||
} else {
|
||||
SkASSERT(kAccountFor_BudgetBehavior == behavior);
|
||||
|
||||
fEntryCount += 1;
|
||||
fEntryBytes += entry->fCachedSize;
|
||||
|
||||
#if GR_CACHE_STATS
|
||||
if (fHighWaterEntryCount < fEntryCount) {
|
||||
fHighWaterEntryCount = fEntryCount;
|
||||
}
|
||||
if (fHighWaterEntryBytes < fEntryBytes) {
|
||||
fHighWaterEntryBytes = fEntryBytes;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
// This functor just searches for an entry with only a single ref (from
|
||||
@ -160,40 +193,41 @@ void GrResourceCache::attachToHead(GrResourceCacheEntry* entry) {
|
||||
class GrTFindUnreffedFunctor {
|
||||
public:
|
||||
bool operator()(const GrResourceCacheEntry* entry) const {
|
||||
return entry->resource()->isPurgable();
|
||||
return entry->resource()->unique();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void GrResourceCache::makeResourceMRU(GrGpuResource* resource) {
|
||||
GrResourceCacheEntry* entry = resource->getCacheEntry();
|
||||
if (entry) {
|
||||
this->internalDetach(entry);
|
||||
this->attachToHead(entry);
|
||||
}
|
||||
}
|
||||
|
||||
GrGpuResource* GrResourceCache::find(const GrResourceKey& key) {
|
||||
GrGpuResource* GrResourceCache::find(const GrResourceKey& key, uint32_t ownershipFlags) {
|
||||
GrAutoResourceCacheValidate atcv(this);
|
||||
|
||||
GrResourceCacheEntry* entry = NULL;
|
||||
|
||||
entry = fCache.find(key);
|
||||
if (ownershipFlags & kNoOtherOwners_OwnershipFlag) {
|
||||
GrTFindUnreffedFunctor functor;
|
||||
|
||||
entry = fCache.find<GrTFindUnreffedFunctor>(key, functor);
|
||||
} else {
|
||||
entry = fCache.find(key);
|
||||
}
|
||||
|
||||
if (NULL == entry) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Make this resource MRU
|
||||
this->internalDetach(entry);
|
||||
this->attachToHead(entry);
|
||||
if (ownershipFlags & kHide_OwnershipFlag) {
|
||||
this->makeExclusive(entry);
|
||||
} else {
|
||||
// Make this resource MRU
|
||||
this->internalDetach(entry);
|
||||
this->attachToHead(entry);
|
||||
}
|
||||
|
||||
// GrResourceCache2 is responsible for scratch resources.
|
||||
SkASSERT(GrIORef::kNo_IsScratch == entry->resource()->fIsScratch);
|
||||
return entry->fResource;
|
||||
}
|
||||
|
||||
void GrResourceCache::addResource(const GrResourceKey& key, GrGpuResource* resource) {
|
||||
void GrResourceCache::addResource(const GrResourceKey& key,
|
||||
GrGpuResource* resource,
|
||||
uint32_t ownershipFlags) {
|
||||
SkASSERT(NULL == resource->getCacheEntry());
|
||||
// we don't expect to create new resources during a purge. In theory
|
||||
// this could cause purgeAsNeeded() into an infinite loop (e.g.
|
||||
@ -207,15 +241,77 @@ void GrResourceCache::addResource(const GrResourceKey& key, GrGpuResource* resou
|
||||
|
||||
this->attachToHead(entry);
|
||||
fCache.insert(key, entry);
|
||||
|
||||
if (ownershipFlags & kHide_OwnershipFlag) {
|
||||
this->makeExclusive(entry);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void GrResourceCache::makeExclusive(GrResourceCacheEntry* entry) {
|
||||
GrAutoResourceCacheValidate atcv(this);
|
||||
|
||||
SkASSERT(!entry->fIsExclusive);
|
||||
entry->fIsExclusive = true;
|
||||
|
||||
// When scratch textures are detached (to hide them from future finds) they
|
||||
// still count against the resource budget
|
||||
this->internalDetach(entry, kIgnore_BudgetBehavior);
|
||||
fCache.remove(entry->key(), entry);
|
||||
|
||||
#ifdef SK_DEBUG
|
||||
fExclusiveList.addToHead(entry);
|
||||
#endif
|
||||
}
|
||||
|
||||
void GrResourceCache::removeInvalidResource(GrResourceCacheEntry* entry) {
|
||||
// If the resource went invalid while it was detached then purge it
|
||||
// This can happen when a 3D context was lost,
|
||||
// the client called GrContext::abandonContext() to notify Gr,
|
||||
// and then later an SkGpuDevice's destructor releases its backing
|
||||
// texture (which was invalidated at contextDestroyed time).
|
||||
// TODO: Safely delete the GrResourceCacheEntry as well.
|
||||
fClientDetachedCount -= 1;
|
||||
fEntryCount -= 1;
|
||||
fClientDetachedBytes -= entry->fCachedSize;
|
||||
fEntryBytes -= entry->fCachedSize;
|
||||
entry->fCachedSize = 0;
|
||||
}
|
||||
|
||||
void GrResourceCache::makeNonExclusive(GrResourceCacheEntry* entry) {
|
||||
GrAutoResourceCacheValidate atcv(this);
|
||||
|
||||
#ifdef SK_DEBUG
|
||||
fExclusiveList.remove(entry);
|
||||
#endif
|
||||
|
||||
if (!entry->resource()->wasDestroyed()) {
|
||||
// Since scratch textures still count against the cache budget even
|
||||
// when they have been removed from the cache, re-adding them doesn't
|
||||
// alter the budget information.
|
||||
attachToHead(entry, kIgnore_BudgetBehavior);
|
||||
fCache.insert(entry->key(), entry);
|
||||
|
||||
SkASSERT(entry->fIsExclusive);
|
||||
entry->fIsExclusive = false;
|
||||
} else {
|
||||
this->removeInvalidResource(entry);
|
||||
}
|
||||
}
|
||||
|
||||
void GrResourceCache::didIncreaseResourceSize(const GrResourceCacheEntry* entry, size_t amountInc) {
|
||||
fEntryBytes += amountInc;
|
||||
if (entry->fIsExclusive) {
|
||||
fClientDetachedBytes += amountInc;
|
||||
}
|
||||
this->purgeAsNeeded();
|
||||
}
|
||||
|
||||
void GrResourceCache::didDecreaseResourceSize(const GrResourceCacheEntry* entry, size_t amountDec) {
|
||||
fEntryBytes -= amountDec;
|
||||
if (entry->fIsExclusive) {
|
||||
fClientDetachedBytes -= amountDec;
|
||||
}
|
||||
#ifdef SK_DEBUG
|
||||
this->validate();
|
||||
#endif
|
||||
@ -263,6 +359,13 @@ void GrResourceCache::purgeInvalidated() {
|
||||
fInvalidationInbox.poll(&invalidated);
|
||||
|
||||
for (int i = 0; i < invalidated.count(); i++) {
|
||||
// We're somewhat missing an opportunity here. We could use the
|
||||
// default find functor that gives us back resources whether we own
|
||||
// them exclusively or not, and when they're not exclusively owned mark
|
||||
// them for purging later when they do become exclusively owned.
|
||||
//
|
||||
// This is complicated and confusing. May try this in the future. For
|
||||
// now, these resources are just LRU'd as if we never got the message.
|
||||
while (GrResourceCacheEntry* entry = fCache.find(invalidated[i].key, GrTFindUnreffedFunctor())) {
|
||||
this->deleteResource(entry);
|
||||
}
|
||||
@ -270,7 +373,7 @@ void GrResourceCache::purgeInvalidated() {
|
||||
}
|
||||
|
||||
void GrResourceCache::deleteResource(GrResourceCacheEntry* entry) {
|
||||
SkASSERT(entry->fResource->isPurgable());
|
||||
SkASSERT(entry->fResource->unique());
|
||||
|
||||
// remove from our cache
|
||||
fCache.remove(entry->key(), entry);
|
||||
@ -309,7 +412,7 @@ void GrResourceCache::internalPurge(int extraCount, size_t extraBytes) {
|
||||
}
|
||||
|
||||
GrResourceCacheEntry* prev = iter.prev();
|
||||
if (entry->fResource->isPurgable()) {
|
||||
if (entry->fResource->unique()) {
|
||||
changed = true;
|
||||
this->deleteResource(entry);
|
||||
}
|
||||
@ -332,7 +435,14 @@ void GrResourceCache::purgeAllUnlocked() {
|
||||
this->purgeAsNeeded();
|
||||
|
||||
#ifdef SK_DEBUG
|
||||
SkASSERT(fExclusiveList.countEntries() == fClientDetachedCount);
|
||||
SkASSERT(countBytes(fExclusiveList) == fClientDetachedBytes);
|
||||
if (!fCache.count()) {
|
||||
// Items may have been detached from the cache (such as the backing
|
||||
// texture for an SkGpuDevice). The above purge would not have removed
|
||||
// them.
|
||||
SkASSERT(fEntryCount == fClientDetachedCount);
|
||||
SkASSERT(fEntryBytes == fClientDetachedBytes);
|
||||
SkASSERT(fList.isEmpty());
|
||||
}
|
||||
#endif
|
||||
@ -364,26 +474,43 @@ static bool both_zero_or_nonzero(int count, size_t bytes) {
|
||||
|
||||
void GrResourceCache::validate() const {
|
||||
fList.validate();
|
||||
fExclusiveList.validate();
|
||||
SkASSERT(both_zero_or_nonzero(fEntryCount, fEntryBytes));
|
||||
SkASSERT(fEntryCount == fCache.count());
|
||||
SkASSERT(both_zero_or_nonzero(fClientDetachedCount, fClientDetachedBytes));
|
||||
SkASSERT(fClientDetachedBytes <= fEntryBytes);
|
||||
SkASSERT(fClientDetachedCount <= fEntryCount);
|
||||
SkASSERT((fEntryCount - fClientDetachedCount) == fCache.count());
|
||||
|
||||
EntryList::Iter iter;
|
||||
|
||||
// check that the shareable entries are okay
|
||||
const GrResourceCacheEntry* entry = iter.init(const_cast<EntryList&>(fList),
|
||||
// check that the exclusively held entries are okay
|
||||
const GrResourceCacheEntry* entry = iter.init(const_cast<EntryList&>(fExclusiveList),
|
||||
EntryList::Iter::kHead_IterStart);
|
||||
|
||||
for ( ; entry; entry = iter.next()) {
|
||||
entry->validate();
|
||||
}
|
||||
|
||||
// check that the shareable entries are okay
|
||||
entry = iter.init(const_cast<EntryList&>(fList), EntryList::Iter::kHead_IterStart);
|
||||
|
||||
int count = 0;
|
||||
for ( ; entry; entry = iter.next()) {
|
||||
entry->validate();
|
||||
SkASSERT(fCache.find(entry->key()));
|
||||
count += 1;
|
||||
}
|
||||
SkASSERT(count == fEntryCount);
|
||||
SkASSERT(count == fEntryCount - fClientDetachedCount);
|
||||
|
||||
size_t bytes = this->countBytes(fList);
|
||||
SkASSERT(bytes == fEntryBytes);
|
||||
SkASSERT(fList.countEntries() == fEntryCount);
|
||||
size_t bytes = countBytes(fList);
|
||||
SkASSERT(bytes == fEntryBytes - fClientDetachedBytes);
|
||||
|
||||
bytes = countBytes(fExclusiveList);
|
||||
SkASSERT(bytes == fClientDetachedBytes);
|
||||
|
||||
SkASSERT(fList.countEntries() == fEntryCount - fClientDetachedCount);
|
||||
|
||||
SkASSERT(fExclusiveList.countEntries() == fClientDetachedCount);
|
||||
}
|
||||
#endif // SK_DEBUG
|
||||
|
||||
@ -407,6 +534,10 @@ void GrResourceCache::printStats() {
|
||||
fEntryCount, locked, fHighWaterEntryCount);
|
||||
SkDebugf("\t\tEntry Bytes: current %d high %d\n",
|
||||
fEntryBytes, fHighWaterEntryBytes);
|
||||
SkDebugf("\t\tDetached Entry Count: current %d high %d\n",
|
||||
fClientDetachedCount, fHighWaterClientDetachedCount);
|
||||
SkDebugf("\t\tDetached Bytes: current %d high %d\n",
|
||||
fClientDetachedBytes, fHighWaterClientDetachedBytes);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -141,13 +141,26 @@ public:
|
||||
*/
|
||||
int getCachedResourceCount() const { return fEntryCount; }
|
||||
|
||||
// For a found or added resource to be completely exclusive to the caller
|
||||
// both the kNoOtherOwners and kHide flags need to be specified
|
||||
enum OwnershipFlags {
|
||||
kNoOtherOwners_OwnershipFlag = 0x1, // found/added resource has no other owners
|
||||
kHide_OwnershipFlag = 0x2 // found/added resource is hidden from future 'find's
|
||||
};
|
||||
|
||||
/**
|
||||
* Search for an entry with the same Key. If found, return it.
|
||||
* If not found, return null.
|
||||
* If ownershipFlags includes kNoOtherOwners and a resource is returned
|
||||
* then that resource has no other refs to it.
|
||||
* If ownershipFlags includes kHide and a resource is returned then that
|
||||
* resource will not be returned from future 'find' calls until it is
|
||||
* 'freed' (and recycled) or makeNonExclusive is called.
|
||||
* For a resource to be completely exclusive to a caller both kNoOtherOwners
|
||||
* and kHide must be specified.
|
||||
*/
|
||||
GrGpuResource* find(const GrResourceKey& key);
|
||||
|
||||
void makeResourceMRU(GrGpuResource*);
|
||||
GrGpuResource* find(const GrResourceKey& key,
|
||||
uint32_t ownershipFlags = 0);
|
||||
|
||||
/**
|
||||
* Add the new resource to the cache (by creating a new cache entry based
|
||||
@ -155,8 +168,14 @@ public:
|
||||
*
|
||||
* Ownership of the resource is transferred to the resource cache,
|
||||
* which will unref() it when it is purged or deleted.
|
||||
*
|
||||
* If ownershipFlags includes kHide, subsequent calls to 'find' will not
|
||||
* return 'resource' until it is 'freed' (and recycled) or makeNonExclusive
|
||||
* is called.
|
||||
*/
|
||||
void addResource(const GrResourceKey& key, GrGpuResource* resource);
|
||||
void addResource(const GrResourceKey& key,
|
||||
GrGpuResource* resource,
|
||||
uint32_t ownershipFlags = 0);
|
||||
|
||||
/**
|
||||
* Determines if the cache contains an entry matching a key. If a matching
|
||||
@ -164,6 +183,20 @@ public:
|
||||
*/
|
||||
bool hasKey(const GrResourceKey& key) const { return SkToBool(fCache.find(key)); }
|
||||
|
||||
/**
|
||||
* Hide 'entry' so that future searches will not find it. Such
|
||||
* hidden entries will not be purged. The entry still counts against
|
||||
* the cache's budget and should be made non-exclusive when exclusive access
|
||||
* is no longer needed.
|
||||
*/
|
||||
void makeExclusive(GrResourceCacheEntry* entry);
|
||||
|
||||
/**
|
||||
* Restore 'entry' so that it can be found by future searches. 'entry'
|
||||
* will also be purgeable (provided its lock count is now 0.)
|
||||
*/
|
||||
void makeNonExclusive(GrResourceCacheEntry* entry);
|
||||
|
||||
/**
|
||||
* Notify the cache that the size of a resource has changed.
|
||||
*/
|
||||
@ -204,8 +237,15 @@ public:
|
||||
#endif
|
||||
|
||||
private:
|
||||
void internalDetach(GrResourceCacheEntry*);
|
||||
void attachToHead(GrResourceCacheEntry*);
|
||||
enum BudgetBehaviors {
|
||||
kAccountFor_BudgetBehavior,
|
||||
kIgnore_BudgetBehavior
|
||||
};
|
||||
|
||||
void internalDetach(GrResourceCacheEntry*, BudgetBehaviors behavior = kAccountFor_BudgetBehavior);
|
||||
void attachToHead(GrResourceCacheEntry*, BudgetBehaviors behavior = kAccountFor_BudgetBehavior);
|
||||
|
||||
void removeInvalidResource(GrResourceCacheEntry* entry);
|
||||
|
||||
SkTMultiMap<GrResourceCacheEntry, GrResourceKey> fCache;
|
||||
|
||||
@ -213,6 +253,11 @@ private:
|
||||
typedef SkTInternalLList<GrResourceCacheEntry> EntryList;
|
||||
EntryList fList;
|
||||
|
||||
#ifdef SK_DEBUG
|
||||
// These objects cannot be returned by a search
|
||||
EntryList fExclusiveList;
|
||||
#endif
|
||||
|
||||
// our budget, used in purgeAsNeeded()
|
||||
int fMaxCount;
|
||||
size_t fMaxBytes;
|
||||
@ -221,10 +266,14 @@ private:
|
||||
#if GR_CACHE_STATS
|
||||
int fHighWaterEntryCount;
|
||||
size_t fHighWaterEntryBytes;
|
||||
int fHighWaterClientDetachedCount;
|
||||
size_t fHighWaterClientDetachedBytes;
|
||||
#endif
|
||||
|
||||
int fEntryCount;
|
||||
size_t fEntryBytes;
|
||||
int fClientDetachedCount;
|
||||
size_t fClientDetachedBytes;
|
||||
|
||||
// prevents recursive purging
|
||||
bool fPurging;
|
||||
|
@ -8,8 +8,7 @@
|
||||
|
||||
|
||||
#include "GrResourceCache2.h"
|
||||
#include "GrGpuResource.h"
|
||||
#include "SkRefCnt.h"
|
||||
#include "GrGpuResource.h"
|
||||
|
||||
GrResourceCache2::~GrResourceCache2() {
|
||||
this->releaseAll();
|
||||
@ -56,31 +55,3 @@ void GrResourceCache2::releaseAll() {
|
||||
SkASSERT(!fScratchMap.count());
|
||||
SkASSERT(!fCount);
|
||||
}
|
||||
|
||||
class GrResourceCache2::AvailableForScratchUse {
|
||||
public:
|
||||
AvailableForScratchUse(bool calledDuringFlush) : fFlushing(calledDuringFlush) { }
|
||||
|
||||
bool operator()(const GrGpuResource* resource) const {
|
||||
if (fFlushing) {
|
||||
// If this request is coming during draw buffer flush then no refs are allowed
|
||||
// either by drawing code or for pending io operations.
|
||||
// This will be removed when flush no longer creates resources.
|
||||
return resource->reffedOnlyByCache() && !resource->internalHasPendingIO() &&
|
||||
GrIORef::kYes_IsScratch == resource->fIsScratch;
|
||||
} else {
|
||||
// Because duties are currently shared between GrResourceCache and GrResourceCache2, the
|
||||
// current interpretation of this rule is that only GrResourceCache has a ref but that
|
||||
// it has been marked as a scratch resource.
|
||||
return resource->reffedOnlyByCache() && GrIORef::kYes_IsScratch == resource->fIsScratch;
|
||||
}
|
||||
}
|
||||
private:
|
||||
bool fFlushing;
|
||||
};
|
||||
|
||||
GrGpuResource* GrResourceCache2::findAndRefScratchResource(const GrResourceKey& scratchKey,
|
||||
bool calledDuringFlush) {
|
||||
SkASSERT(scratchKey.isScratch());
|
||||
return SkSafeRef(fScratchMap.find(scratchKey, AvailableForScratchUse(calledDuringFlush)));
|
||||
}
|
||||
|
@ -24,17 +24,14 @@ public:
|
||||
GrResourceCache2() : fCount(0) {};
|
||||
~GrResourceCache2();
|
||||
|
||||
void insertResource(GrGpuResource*);
|
||||
void insertResource(GrGpuResource* resource);
|
||||
|
||||
void removeResource(GrGpuResource*);
|
||||
void removeResource(GrGpuResource* resource);
|
||||
|
||||
void abandonAll();
|
||||
|
||||
void releaseAll();
|
||||
|
||||
GrGpuResource* findAndRefScratchResource(const GrResourceKey& scratchKey,
|
||||
bool calledDuringFlush);
|
||||
|
||||
private:
|
||||
#ifdef SK_DEBUG
|
||||
bool isInCache(const GrGpuResource* r) const {
|
||||
@ -42,8 +39,8 @@ private:
|
||||
}
|
||||
#endif
|
||||
|
||||
class AvailableForScratchUse;
|
||||
|
||||
void removeScratch(const GrGpuResource* resource);
|
||||
struct ScratchMapTraits {
|
||||
static const GrResourceKey& GetKey(const GrGpuResource& r) {
|
||||
return r.getScratchKey();
|
||||
|
@ -21,6 +21,27 @@ GrTexture::~GrTexture() {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This method allows us to interrupt the normal deletion process and place
|
||||
* textures back in the texture cache when their ref count goes to zero.
|
||||
*/
|
||||
void GrTexture::internal_dispose() const {
|
||||
if (this->texturePriv().isSetFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit) &&
|
||||
this->INHERITED::getContext()) {
|
||||
GrTexture* nonConstThis = const_cast<GrTexture *>(this);
|
||||
this->ref(); // restore ref count to initial setting
|
||||
|
||||
nonConstThis->texturePriv().resetFlag((GrTextureFlags) kReturnToCache_FlagBit);
|
||||
nonConstThis->INHERITED::getContext()->addExistingTextureToCache(nonConstThis);
|
||||
|
||||
// Note: "this" texture might be freed inside addExistingTextureToCache
|
||||
// if it is purged.
|
||||
return;
|
||||
}
|
||||
|
||||
this->INHERITED::internal_dispose();
|
||||
}
|
||||
|
||||
void GrTexture::dirtyMipMaps(bool mipMapsDirty) {
|
||||
if (mipMapsDirty) {
|
||||
if (kValid_MipMapsStatus == fMipMapsStatus) {
|
||||
@ -81,12 +102,27 @@ void GrTexture::writePixels(int left, int top, int width, int height,
|
||||
pixelOpsFlags);
|
||||
}
|
||||
|
||||
void GrTexture::abandonReleaseCommon() {
|
||||
// In debug builds the resource cache tracks removed/exclusive textures and has an unref'ed ptr.
|
||||
// After abandon() or release() the resource cache will be unreachable (getContext() == NULL).
|
||||
// So we readd the texture to the cache here so that it is removed from the exclusive list and
|
||||
// there is no longer an unref'ed ptr to the texture in the cache.
|
||||
if (this->texturePriv().isSetFlag((GrTextureFlags)kReturnToCache_FlagBit)) {
|
||||
SkASSERT(!this->wasDestroyed());
|
||||
this->ref(); // restores the ref the resource cache gave up when it marked this exclusive.
|
||||
this->texturePriv().resetFlag((GrTextureFlags) kReturnToCache_FlagBit);
|
||||
this->getContext()->addExistingTextureToCache(this);
|
||||
}
|
||||
}
|
||||
|
||||
void GrTexture::onRelease() {
|
||||
this->abandonReleaseCommon();
|
||||
SkASSERT(!this->texturePriv().isSetFlag((GrTextureFlags) kReturnToCache_FlagBit));
|
||||
INHERITED::onRelease();
|
||||
}
|
||||
|
||||
void GrTexture::onAbandon() {
|
||||
this->abandonReleaseCommon();
|
||||
if (fRenderTarget.get()) {
|
||||
fRenderTarget->abandon();
|
||||
}
|
||||
|
@ -77,37 +77,50 @@ enum { kDefaultImageFilterCacheSize = 32 * 1024 * 1024 };
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Helper for turning a bitmap into a texture. If the bitmap is GrTexture backed this
|
||||
// just accesses the backing GrTexture. Otherwise, it creates a cached texture
|
||||
// representation and releases it in the destructor.
|
||||
class AutoBitmapTexture : public SkNoncopyable {
|
||||
public:
|
||||
AutoBitmapTexture() {}
|
||||
|
||||
AutoBitmapTexture(GrContext* context,
|
||||
const SkBitmap& bitmap,
|
||||
const GrTextureParams* params,
|
||||
GrTexture** texture) {
|
||||
SkASSERT(texture);
|
||||
*texture = this->set(context, bitmap, params);
|
||||
class SkGpuDevice::SkAutoCachedTexture : public ::SkNoncopyable {
|
||||
public:
|
||||
SkAutoCachedTexture()
|
||||
: fDevice(NULL)
|
||||
, fTexture(NULL) {
|
||||
}
|
||||
|
||||
GrTexture* set(GrContext* context,
|
||||
const SkBitmap& bitmap,
|
||||
const GrTextureParams* params) {
|
||||
// Either get the texture directly from the bitmap, or else use the cache and
|
||||
// remember to unref it.
|
||||
if (GrTexture* bmpTexture = bitmap.getTexture()) {
|
||||
fTexture.reset(NULL);
|
||||
return bmpTexture;
|
||||
} else {
|
||||
fTexture.reset(GrRefCachedBitmapTexture(context, bitmap, params));
|
||||
return fTexture.get();
|
||||
SkAutoCachedTexture(SkGpuDevice* device,
|
||||
const SkBitmap& bitmap,
|
||||
const GrTextureParams* params,
|
||||
GrTexture** texture)
|
||||
: fDevice(NULL)
|
||||
, fTexture(NULL) {
|
||||
SkASSERT(texture);
|
||||
*texture = this->set(device, bitmap, params);
|
||||
}
|
||||
|
||||
~SkAutoCachedTexture() {
|
||||
if (fTexture) {
|
||||
GrUnlockAndUnrefCachedBitmapTexture(fTexture);
|
||||
}
|
||||
}
|
||||
|
||||
GrTexture* set(SkGpuDevice* device,
|
||||
const SkBitmap& bitmap,
|
||||
const GrTextureParams* params) {
|
||||
if (fTexture) {
|
||||
GrUnlockAndUnrefCachedBitmapTexture(fTexture);
|
||||
fTexture = NULL;
|
||||
}
|
||||
fDevice = device;
|
||||
GrTexture* result = (GrTexture*)bitmap.getTexture();
|
||||
if (NULL == result) {
|
||||
// Cannot return the native texture so look it up in our cache
|
||||
fTexture = GrLockAndRefCachedBitmapTexture(device->context(), bitmap, params);
|
||||
result = fTexture;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
SkAutoTUnref<GrTexture> fTexture;
|
||||
SkGpuDevice* fDevice;
|
||||
GrTexture* fTexture;
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
@ -140,7 +153,8 @@ SkGpuDevice::SkGpuDevice(GrSurface* surface, const SkSurfaceProps& props, unsign
|
||||
fRenderTarget = SkRef(surface->asRenderTarget());
|
||||
|
||||
SkImageInfo info = surface->surfacePriv().info();
|
||||
SkPixelRef* pr = SkNEW_ARGS(SkGrPixelRef, (info, surface));
|
||||
SkPixelRef* pr = SkNEW_ARGS(SkGrPixelRef,
|
||||
(info, surface, SkToBool(flags & kCached_Flag)));
|
||||
fLegacyBitmap.setInfo(info);
|
||||
fLegacyBitmap.setPixelRef(pr)->unref();
|
||||
|
||||
@ -1285,7 +1299,7 @@ void SkGpuDevice::internalDrawBitmap(const SkBitmap& bitmap,
|
||||
bitmap.height() <= fContext->getMaxTextureSize());
|
||||
|
||||
GrTexture* texture;
|
||||
AutoBitmapTexture abt(fContext, bitmap, ¶ms, &texture);
|
||||
SkAutoCachedTexture act(this, bitmap, ¶ms, &texture);
|
||||
if (NULL == texture) {
|
||||
return;
|
||||
}
|
||||
@ -1380,7 +1394,7 @@ void SkGpuDevice::drawSprite(const SkDraw& draw, const SkBitmap& bitmap,
|
||||
|
||||
GrTexture* texture;
|
||||
// draw sprite uses the default texture params
|
||||
AutoBitmapTexture abt(fContext, bitmap, NULL, &texture);
|
||||
SkAutoCachedTexture act(this, bitmap, NULL, &texture);
|
||||
|
||||
SkImageFilter* filter = paint.getImageFilter();
|
||||
// This bitmap will own the filtered result as a texture.
|
||||
@ -1557,7 +1571,7 @@ bool SkGpuDevice::filterImage(const SkImageFilter* filter, const SkBitmap& src,
|
||||
GrTexture* texture;
|
||||
// We assume here that the filter will not attempt to tile the src. Otherwise, this cache lookup
|
||||
// must be pushed upstack.
|
||||
AutoBitmapTexture abt(fContext, src, NULL, &texture);
|
||||
SkAutoCachedTexture act(this, src, NULL, &texture);
|
||||
|
||||
return filter_texture(this, fContext, texture, filter, src.width(), src.height(), ctx,
|
||||
result, offset);
|
||||
@ -1788,6 +1802,7 @@ SkBaseDevice* SkGpuDevice::onCreateDevice(const SkImageInfo& info, Usage usage)
|
||||
#if CACHE_COMPATIBLE_DEVICE_TEXTURES
|
||||
// layers are never draw in repeat modes, so we can request an approx
|
||||
// match and ignore any padding.
|
||||
flags |= kCached_Flag;
|
||||
const GrContext::ScratchTexMatch match = (kSaveLayer_Usage == usage) ?
|
||||
GrContext::kApprox_ScratchTexMatch :
|
||||
GrContext::kExact_ScratchTexMatch;
|
||||
|
@ -34,7 +34,8 @@ class SK_API SkGpuDevice : public SkBaseDevice {
|
||||
public:
|
||||
enum Flags {
|
||||
kNeedClear_Flag = 1 << 0, //!< Surface requires an initial clear
|
||||
kDFFonts_Flag = 1 << 1, //!< Surface should render fonts using signed distance fields
|
||||
kCached_Flag = 1 << 1, //!< Surface is cached and needs to be unlocked when released
|
||||
kDFFonts_Flag = 1 << 2, //!< Surface should render fonts using signed distance fields
|
||||
};
|
||||
|
||||
/**
|
||||
@ -116,6 +117,9 @@ public:
|
||||
const SkImageFilter::Context&,
|
||||
SkBitmap*, SkIPoint*) SK_OVERRIDE;
|
||||
|
||||
class SkAutoCachedTexture; // used internally
|
||||
|
||||
|
||||
protected:
|
||||
virtual bool onReadPixels(const SkImageInfo&, void*, size_t, int, int) SK_OVERRIDE;
|
||||
virtual bool onWritePixels(const SkImageInfo&, const void*, size_t, int, int) SK_OVERRIDE;
|
||||
|
@ -369,9 +369,9 @@ bool GrIsBitmapInCache(const GrContext* ctx,
|
||||
return ctx->isTextureInCache(desc, cacheID, params);
|
||||
}
|
||||
|
||||
GrTexture* GrRefCachedBitmapTexture(GrContext* ctx,
|
||||
const SkBitmap& bitmap,
|
||||
const GrTextureParams* params) {
|
||||
GrTexture* GrLockAndRefCachedBitmapTexture(GrContext* ctx,
|
||||
const SkBitmap& bitmap,
|
||||
const GrTextureParams* params) {
|
||||
GrTexture* result = NULL;
|
||||
|
||||
bool cache = !bitmap.isVolatile();
|
||||
@ -397,6 +397,13 @@ GrTexture* GrRefCachedBitmapTexture(GrContext* ctx,
|
||||
return result;
|
||||
}
|
||||
|
||||
void GrUnlockAndUnrefCachedBitmapTexture(GrTexture* texture) {
|
||||
SkASSERT(texture->getContext());
|
||||
|
||||
texture->getContext()->unlockScratchTexture(texture);
|
||||
texture->unref();
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// alphatype is ignore for now, but if GrPixelConfig is expanded to encompass
|
||||
|
@ -99,7 +99,8 @@ static SkGrPixelRef* copy_to_new_texture_pixelref(GrTexture* texture, SkColorTyp
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SkGrPixelRef::SkGrPixelRef(const SkImageInfo& info, GrSurface* surface) : INHERITED(info) {
|
||||
SkGrPixelRef::SkGrPixelRef(const SkImageInfo& info, GrSurface* surface,
|
||||
bool transferCacheLock) : INHERITED(info) {
|
||||
// For surfaces that are both textures and render targets, the texture owns the
|
||||
// render target but not vice versa. So we ref the texture to keep both alive for
|
||||
// the lifetime of this pixel ref.
|
||||
@ -107,6 +108,7 @@ SkGrPixelRef::SkGrPixelRef(const SkImageInfo& info, GrSurface* surface) : INHERI
|
||||
if (NULL == fSurface) {
|
||||
fSurface = SkSafeRef(surface);
|
||||
}
|
||||
fUnlock = transferCacheLock;
|
||||
|
||||
if (fSurface) {
|
||||
SkASSERT(info.width() <= fSurface->width());
|
||||
@ -115,6 +117,13 @@ SkGrPixelRef::SkGrPixelRef(const SkImageInfo& info, GrSurface* surface) : INHERI
|
||||
}
|
||||
|
||||
SkGrPixelRef::~SkGrPixelRef() {
|
||||
if (fUnlock) {
|
||||
GrContext* context = fSurface->getContext();
|
||||
GrTexture* texture = fSurface->asTexture();
|
||||
if (context && texture) {
|
||||
context->unlockScratchTexture(texture);
|
||||
}
|
||||
}
|
||||
SkSafeUnref(fSurface);
|
||||
}
|
||||
|
||||
|
@ -216,6 +216,7 @@ void GrTextureStripAtlas::unlockTexture() {
|
||||
SkASSERT(fTexture && 0 == fLockedRows);
|
||||
fTexture->unref();
|
||||
fTexture = NULL;
|
||||
fDesc.fContext->purgeCache();
|
||||
}
|
||||
|
||||
void GrTextureStripAtlas::initLRU() {
|
||||
|
@ -14,7 +14,7 @@ class SkSurface_Gpu : public SkSurface_Base {
|
||||
public:
|
||||
SK_DECLARE_INST_COUNT(SkSurface_Gpu)
|
||||
|
||||
SkSurface_Gpu(GrRenderTarget*, const SkSurfaceProps*, bool doClear);
|
||||
SkSurface_Gpu(GrRenderTarget*, bool cached, const SkSurfaceProps*, bool doClear);
|
||||
virtual ~SkSurface_Gpu();
|
||||
|
||||
virtual SkCanvas* onNewCanvas() SK_OVERRIDE;
|
||||
@ -33,10 +33,12 @@ private:
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SkSurface_Gpu::SkSurface_Gpu(GrRenderTarget* renderTarget, const SkSurfaceProps* props,
|
||||
SkSurface_Gpu::SkSurface_Gpu(GrRenderTarget* renderTarget, bool cached, const SkSurfaceProps* props,
|
||||
bool doClear)
|
||||
: INHERITED(renderTarget->width(), renderTarget->height(), props) {
|
||||
: INHERITED(renderTarget->width(), renderTarget->height(), props)
|
||||
{
|
||||
int deviceFlags = 0;
|
||||
deviceFlags |= cached ? SkGpuDevice::kCached_Flag : 0;
|
||||
deviceFlags |= this->props().isUseDistanceFieldFonts() ? SkGpuDevice::kDFFonts_Flag : 0;
|
||||
fDevice = SkGpuDevice::Create(renderTarget, this->props(), deviceFlags);
|
||||
|
||||
@ -109,7 +111,7 @@ SkSurface* SkSurface::NewRenderTargetDirect(GrRenderTarget* target, const SkSurf
|
||||
if (NULL == target) {
|
||||
return NULL;
|
||||
}
|
||||
return SkNEW_ARGS(SkSurface_Gpu, (target, props, false));
|
||||
return SkNEW_ARGS(SkSurface_Gpu, (target, false, props, false));
|
||||
}
|
||||
|
||||
SkSurface* SkSurface::NewRenderTarget(GrContext* ctx, const SkImageInfo& info, int sampleCount,
|
||||
@ -130,7 +132,7 @@ SkSurface* SkSurface::NewRenderTarget(GrContext* ctx, const SkImageInfo& info, i
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return SkNEW_ARGS(SkSurface_Gpu, (tex->asRenderTarget(), props, true));
|
||||
return SkNEW_ARGS(SkSurface_Gpu, (tex->asRenderTarget(), false, props, true));
|
||||
}
|
||||
|
||||
SkSurface* SkSurface::NewScratchRenderTarget(GrContext* ctx, const SkImageInfo& info,
|
||||
@ -152,5 +154,5 @@ SkSurface* SkSurface::NewScratchRenderTarget(GrContext* ctx, const SkImageInfo&
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return SkNEW_ARGS(SkSurface_Gpu, (tex->asRenderTarget(), props, true));
|
||||
return SkNEW_ARGS(SkSurface_Gpu, (tex->asRenderTarget(), true, props, true));
|
||||
}
|
||||
|
@ -247,6 +247,37 @@ static void test_resource_size_changed(skiatest::Reporter* reporter,
|
||||
REPORTER_ASSERT(reporter, 201 == cache.getCachedResourceBytes());
|
||||
REPORTER_ASSERT(reporter, 1 == cache.getCachedResourceCount());
|
||||
}
|
||||
|
||||
// Test changing the size of an exclusively-held resource.
|
||||
{
|
||||
GrResourceCache cache(2, 300);
|
||||
|
||||
TestResource* a = new TestResource(context->getGpu(), 100);
|
||||
cache.addResource(key1, a);
|
||||
cache.makeExclusive(a->getCacheEntry());
|
||||
|
||||
TestResource* b = new TestResource(context->getGpu(), 100);
|
||||
cache.addResource(key2, b);
|
||||
b->unref();
|
||||
|
||||
REPORTER_ASSERT(reporter, 200 == cache.getCachedResourceBytes());
|
||||
REPORTER_ASSERT(reporter, 2 == cache.getCachedResourceCount());
|
||||
REPORTER_ASSERT(reporter, NULL == cache.find(key1));
|
||||
|
||||
a->setSize(200);
|
||||
|
||||
REPORTER_ASSERT(reporter, 300 == cache.getCachedResourceBytes());
|
||||
REPORTER_ASSERT(reporter, 2 == cache.getCachedResourceCount());
|
||||
// Internal resource cache validation will test the detached size (debug mode only).
|
||||
|
||||
cache.makeNonExclusive(a->getCacheEntry());
|
||||
a->unref();
|
||||
|
||||
REPORTER_ASSERT(reporter, 300 == cache.getCachedResourceBytes());
|
||||
REPORTER_ASSERT(reporter, 2 == cache.getCachedResourceCount());
|
||||
REPORTER_ASSERT(reporter, cache.find(key1));
|
||||
// Internal resource cache validation will test the detached size (debug mode only).
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
Loading…
Reference in New Issue
Block a user