Move npot resizing out of GrContext and simplify GrContext texture functions.

Review URL: https://codereview.chromium.org/882223003
This commit is contained in:
bsalomon 2015-01-30 12:43:44 -08:00 committed by Commit bot
parent c8fcafb3f0
commit 8a81003491
10 changed files with 330 additions and 419 deletions

View File

@ -169,10 +169,10 @@ public:
*/
void purgeAllUnlockedResources();
/**
* Stores a custom resource in the cache, based on the specified key.
/** Sets a content key on the resource. The resource must not already have a content key and
* the key must not already be in use for this to succeed.
*/
void addResourceToCache(const GrContentKey&, GrGpuResource*);
bool addResourceToCache(const GrContentKey&, GrGpuResource*);
/**
* Finds a resource in the cache, based on the specified key. This is intended for use in
@ -181,6 +181,25 @@ public:
*/
GrGpuResource* findAndRefCachedResource(const GrContentKey&);
/** Helper for casting resource to a texture. Caller must be sure that the resource cached
with the key is either NULL or a texture and not another resource type. */
GrTexture* findAndRefCachedTexture(const GrContentKey& key) {
GrGpuResource* resource = this->findAndRefCachedResource(key);
if (resource) {
GrTexture* texture = static_cast<GrSurface*>(resource)->asTexture();
SkASSERT(texture);
return texture;
}
return NULL;
}
/**
* Determines whether a resource is in the cache. If the resource is found it
* will not be locked or returned. This call does not affect the priority of
* the resource for deletion.
*/
bool isResourceInCache(const GrContentKey& key) const;
/**
* Creates a new text rendering context that is optimal for the
* render target and the context. Caller assumes the ownership
@ -195,57 +214,28 @@ public:
// Textures
/**
* Creates a new entry, based on the specified key and texture and returns it. The caller owns a
* Creates a new texture in the resource cache and returns it. The caller owns a
* ref on the returned texture which must be balanced by a call to unref.
*
* TODO: Move resizing logic out of GrContext and have the caller set the content key on the
* returned texture rather than take it as a param.
*
* @param params The texture params used to draw a texture may help determine
* the cache entry used. (e.g. different versions may exist
* for different wrap modes on GPUs with limited NPOT
* texture support). NULL implies clamp wrap modes.
* @param desc Description of the texture properties.
* @param key Key to associate with the texture.
* @param srcData Pointer to the pixel values.
* @param rowBytes The number of bytes between rows of the texture. Zero
* implies tightly packed rows. For compressed pixel configs, this
* field is ignored.
* @param outKey (optional) If non-NULL, we'll write the cache key we used to cacheKey. this
* may differ from key on GPUs that don't support tiling NPOT textures.
*/
GrTexture* createTexture(const GrTextureParams* params,
const GrSurfaceDesc& desc,
const GrContentKey& key,
const void* srcData,
size_t rowBytes,
GrContentKey* outKey = NULL);
GrTexture* createTexture(const GrSurfaceDesc& desc, const void* srcData, size_t rowBytes);
GrTexture* createTexture(const GrSurfaceDesc& desc) {
return this->createTexture(desc, NULL, 0);
}
/**
* Search for an entry based on key and dimensions. If found, ref it and return it. The return
* value will be NULL if not found. The caller must balance with a call to unref.
* Creates a texture that is outside the cache. Does not count against
* cache's budget.
*
* TODO: Remove this function and do lookups generically.
*
* @param desc Description of the texture properties.
* @param key key to use for texture look up.
* @param params The texture params used to draw a texture may help determine
* the cache entry used. (e.g. different versions may exist
* for different wrap modes on GPUs with limited NPOT
* texture support). NULL implies clamp wrap modes.
* TODO: Add a budgeted param to createTexture and remove this function.
*/
GrTexture* findAndRefTexture(const GrSurfaceDesc& desc,
const GrContentKey& key,
const GrTextureParams* params);
/**
* Determines whether a texture is in the cache. If the texture is found it
* will not be locked or returned. This call does not affect the priority of
* the texture for deletion.
*
* TODO: Remove this function and do cache checks generically.
*/
bool isTextureInCache(const GrSurfaceDesc& desc,
const GrContentKey& key,
const GrTextureParams* params) const;
GrTexture* createUncachedTexture(const GrSurfaceDesc& desc, void* srcData, size_t rowBytes);
/**
* Enum that determines how closely a returned scratch texture must match
@ -284,26 +274,9 @@ public:
bool internalFlag = false);
/**
* Creates a texture that is outside the cache. Does not count against
* cache's budget.
*
* Textures created by createTexture() hide the complications of
* tiling non-power-of-two textures on APIs that don't support this (e.g.
* unextended GLES2). NPOT uncached textures are not tilable on such APIs.
* Returns true if index8 textures are supported.
*/
GrTexture* createUncachedTexture(const GrSurfaceDesc& desc,
void* srcData,
size_t rowBytes);
/**
* Returns true if the specified use of an indexed texture is supported.
* Support may depend upon whether the texture params indicate that the
* texture will be tiled. Passing NULL for the texture params indicates
* clamp mode.
*/
bool supportsIndex8PixelConfig(const GrTextureParams*,
int width,
int height) const;
bool supportsIndex8PixelConfig() const;
/**
* Return the max width or height of a texture supported by the current GPU.

View File

@ -25,23 +25,15 @@
* created by subclassing GrProcessor.
*
* The primitive color computation starts with the color specified by setColor(). This color is the
* input to the first color stage. Each color stage feeds its output to the next color stage. The
* final color stage's output color is input to the color filter specified by
* setXfermodeColorFilter which produces the final source color, S.
* input to the first color stage. Each color stage feeds its output to the next color stage.
*
* Fractional pixel coverage follows a similar flow. The coverage is initially the value specified
* by setCoverage(). This is input to the first coverage stage. Coverage stages are chained
* together in the same manner as color stages. The output of the last stage is modulated by any
* fractional coverage produced by anti-aliasing. This last step produces the final coverage, C.
*
* setBlendFunc() specifies blending coefficients for S (described above) and D, the initial value
* of the destination pixel, labeled Bs and Bd respectively. The final value of the destination
* pixel is then D' = (1-C)*D + C*(Bd*D + Bs*S).
*
* Note that the coverage is applied after the blend. This is why they are computed as distinct
* values.
*
* TODO: Encapsulate setXfermodeColorFilter in a GrProcessor and remove from GrPaint.
* setXPFactory is used to control blending between the output color and dest. It also implements
* the application of fractional coverage from the coverage pipeline.
*/
class GrPaint {
public:

View File

@ -220,6 +220,7 @@ public:
Builder(GrContentKey* key, const GrContentKey& innerKey, Domain domain,
int extraData32Cnt)
: INHERITED::Builder(key, domain, Data32CntForInnerKey(innerKey) + extraData32Cnt) {
SkASSERT(&innerKey != key);
// add the inner key to the end of the key so that op[] can be indexed normally.
uint32_t* innerKeyData = &this->operator[](extraData32Cnt);
const uint32_t* srcData = innerKey.data();

View File

@ -302,6 +302,22 @@ static inline bool GrPixelConfigIsCompressed(GrPixelConfig config) {
}
}
/** If the pixel config is compressed, return an equivalent uncompressed format. */
static inline GrPixelConfig GrMakePixelConfigUncompressed(GrPixelConfig config) {
switch (config) {
case kIndex_8_GrPixelConfig:
case kETC1_GrPixelConfig:
case kASTC_12x12_GrPixelConfig:
return kRGBA_8888_GrPixelConfig;
case kLATC_GrPixelConfig:
case kR11_EAC_GrPixelConfig:
return kAlpha_8_GrPixelConfig;
default:
SkASSERT(!GrPixelConfigIsCompressed(config));
return config;
}
}
// Returns true if the pixel config is 32 bits per pixel
static inline bool GrPixelConfigIs8888(GrPixelConfig config) {
switch (config) {

View File

@ -750,7 +750,6 @@ void GrGLRectBlurEffect::setData(const GrGLProgramDataManager& pdman,
bool GrRectBlurEffect::CreateBlurProfileTexture(GrContext *context, float sigma,
GrTexture **blurProfileTexture) {
GrTextureParams params;
GrSurfaceDesc texDesc;
unsigned int profileSize = SkScalarCeilToInt(6*sigma);
@ -768,18 +767,19 @@ bool GrRectBlurEffect::CreateBlurProfileTexture(GrContext *context, float sigma,
uint8_t *profile = NULL;
SkAutoTDeleteArray<uint8_t> ada(NULL);
*blurProfileTexture = context->findAndRefTexture(texDesc, key, &params);
*blurProfileTexture = context->findAndRefCachedTexture(key);
if (NULL == *blurProfileTexture) {
SkBlurMask::ComputeBlurProfile(sigma, &profile);
ada.reset(profile);
*blurProfileTexture = context->createTexture(&params, texDesc, key, profile, 0);
*blurProfileTexture = context->createTexture(texDesc, profile, 0);
if (NULL == *blurProfileTexture) {
return false;
}
SkAssertResult(context->addResourceToCache(key, *blurProfileTexture));
}
return true;
@ -925,21 +925,13 @@ GrFragmentProcessor* GrRRectBlurEffect::Create(GrContext* context, float sigma,
builder[1] = cornerRadius;
builder.finish();
GrTextureParams params;
params.setFilterMode(GrTextureParams::kBilerp_FilterMode);
unsigned int smallRectSide = 2*(blurRadius + cornerRadius) + 1;
unsigned int texSide = smallRectSide + 2*blurRadius;
GrSurfaceDesc texDesc;
texDesc.fWidth = texSide;
texDesc.fHeight = texSide;
texDesc.fConfig = kAlpha_8_GrPixelConfig;
GrTexture *blurNinePatchTexture = context->findAndRefTexture(texDesc, key, &params);
GrTexture *blurNinePatchTexture = context->findAndRefCachedTexture(key);
if (NULL == blurNinePatchTexture) {
SkMask mask;
unsigned int smallRectSide = 2*(blurRadius + cornerRadius) + 1;
mask.fBounds = SkIRect::MakeWH(smallRectSide, smallRectSide);
mask.fFormat = SkMask::kA8_Format;
mask.fRowBytes = mask.fBounds.width();
@ -957,12 +949,22 @@ GrFragmentProcessor* GrRRectBlurEffect::Create(GrContext* context, float sigma,
SkPath path;
path.addRRect( smallRRect );
SkDraw::DrawToMask(path, &mask.fBounds, NULL, NULL, &mask, SkMask::kJustRenderImage_CreateMode, SkPaint::kFill_Style);
SkDraw::DrawToMask(path, &mask.fBounds, NULL, NULL, &mask,
SkMask::kJustRenderImage_CreateMode, SkPaint::kFill_Style);
SkMask blurred_mask;
SkBlurMask::BoxBlur(&blurred_mask, mask, sigma, kNormal_SkBlurStyle, kHigh_SkBlurQuality, NULL, true );
SkBlurMask::BoxBlur(&blurred_mask, mask, sigma, kNormal_SkBlurStyle, kHigh_SkBlurQuality,
NULL, true );
unsigned int texSide = smallRectSide + 2*blurRadius;
GrSurfaceDesc texDesc;
texDesc.fWidth = texSide;
texDesc.fHeight = texSide;
texDesc.fConfig = kAlpha_8_GrPixelConfig;
blurNinePatchTexture = context->createTexture(texDesc, blurred_mask.fImage, 0);
SkAssertResult(context->addResourceToCache(key, blurNinePatchTexture));
blurNinePatchTexture = context->createTexture(&params, texDesc, key, blurred_mask.fImage, 0);
SkMask::FreeImage(blurred_mask.fImage);
}

View File

@ -354,12 +354,12 @@ GrFragmentProcessor* SkColorCubeFilter::asFragmentProcessor(GrContext* context)
desc.fHeight = fCache.cubeDimension() * fCache.cubeDimension();
desc.fConfig = kRGBA_8888_GrPixelConfig;
GrSurface* surface = static_cast<GrSurface*>(context->findAndRefCachedResource(key));
SkAutoTUnref<GrTexture> textureCube;
if (surface) {
textureCube.reset(surface->asTexture());
} else {
textureCube.reset(context->createTexture(NULL, desc, key, fCubeData->data(), 0));
SkAutoTUnref<GrTexture> textureCube(context->findAndRefCachedTexture(key));
if (!textureCube) {
textureCube.reset(context->createTexture(desc, fCubeData->data(), 0));
if (textureCube) {
SkAssertResult(context->addResourceToCache(key, textureCube));
}
}
return textureCube ? GrColorCubeEffect::Create(textureCube) : NULL;

View File

@ -223,219 +223,9 @@ GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget,
////////////////////////////////////////////////////////////////////////////////
static void stretch_image(void* dst,
int dstW,
int dstH,
const void* src,
int srcW,
int srcH,
size_t bpp) {
SkFixed dx = (srcW << 16) / dstW;
SkFixed dy = (srcH << 16) / dstH;
SkFixed y = dy >> 1;
size_t dstXLimit = dstW*bpp;
for (int j = 0; j < dstH; ++j) {
SkFixed x = dx >> 1;
const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16)*srcW*bpp;
uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp;
for (size_t i = 0; i < dstXLimit; i += bpp) {
memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp);
x += dx;
}
y += dy;
}
}
enum ResizeFlags {
/**
* The kStretchToPOT bit is set when the texture is NPOT and is being repeated or mipped but the
* hardware doesn't support that feature.
*/
kStretchToPOT_ResizeFlag = 0x1,
/**
* The kBilerp bit can only be set when the kStretchToPOT flag is set and indicates whether the
* stretched texture should be bilerped.
*/
kBilerp_ResizeFlag = 0x2,
};
static uint32_t get_texture_flags(const GrGpu* gpu,
const GrTextureParams* params,
const GrSurfaceDesc& desc) {
uint32_t flags = 0;
bool tiled = params && params->isTiled();
if (tiled && !gpu->caps()->npotTextureTileSupport()) {
if (!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight)) {
flags |= kStretchToPOT_ResizeFlag;
switch(params->filterMode()) {
case GrTextureParams::kNone_FilterMode:
break;
case GrTextureParams::kBilerp_FilterMode:
case GrTextureParams::kMipMap_FilterMode:
flags |= kBilerp_ResizeFlag;
break;
}
}
}
return flags;
}
// The desired texture is NPOT and tiled but that isn't supported by
// the current hardware. Resize the texture to be a POT
GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc,
const GrContentKey& origKey,
const void* srcData,
size_t rowBytes,
bool filter) {
SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, origKey, NULL));
if (NULL == clampedTexture) {
clampedTexture.reset(this->createTexture(NULL, desc, origKey, srcData, rowBytes));
if (NULL == clampedTexture) {
return NULL;
}
clampedTexture->cacheAccess().setContentKey(origKey);
}
GrSurfaceDesc rtDesc = desc;
rtDesc.fFlags = rtDesc.fFlags |
kRenderTarget_GrSurfaceFlag |
kNoStencil_GrSurfaceFlag;
rtDesc.fWidth = GrNextPow2(desc.fWidth);
rtDesc.fHeight = GrNextPow2(desc.fHeight);
GrTexture* texture = fGpu->createTexture(rtDesc, true, NULL, 0);
if (texture) {
GrPipelineBuilder pipelineBuilder;
pipelineBuilder.setRenderTarget(texture->asRenderTarget());
// if filtering is not desired then we want to ensure all
// texels in the resampled image are copies of texels from
// the original.
GrTextureParams params(SkShader::kClamp_TileMode,
filter ? GrTextureParams::kBilerp_FilterMode :
GrTextureParams::kNone_FilterMode);
pipelineBuilder.addColorTextureProcessor(clampedTexture, SkMatrix::I(), params);
uint32_t flags = GrDefaultGeoProcFactory::kPosition_GPType |
GrDefaultGeoProcFactory::kLocalCoord_GPType;
SkAutoTUnref<const GrGeometryProcessor> gp(
GrDefaultGeoProcFactory::Create(flags, GrColor_WHITE));
GrDrawTarget::AutoReleaseGeometry arg(fDrawBuffer, 4, gp->getVertexStride(), 0);
SkASSERT(gp->getVertexStride() == 2 * sizeof(SkPoint));
if (arg.succeeded()) {
SkPoint* verts = (SkPoint*) arg.vertices();
verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint));
verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint));
fDrawBuffer->drawNonIndexed(&pipelineBuilder, gp, kTriangleFan_GrPrimitiveType, 0, 4);
} else {
texture->unref();
texture = NULL;
}
} else {
// TODO: Our CPU stretch doesn't filter. But we create separate
// stretched textures when the texture params is either filtered or
// not. Either implement filtered stretch blit on CPU or just create
// one when FBO case fails.
rtDesc.fFlags = kNone_GrSurfaceFlags;
// no longer need to clamp at min RT size.
rtDesc.fWidth = GrNextPow2(desc.fWidth);
rtDesc.fHeight = GrNextPow2(desc.fHeight);
// We shouldn't be resizing a compressed texture.
SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
size_t bpp = GrBytesPerPixel(desc.fConfig);
GrAutoMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
stretch_image(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
srcData, desc.fWidth, desc.fHeight, bpp);
size_t stretchedRowBytes = rtDesc.fWidth * bpp;
texture = fGpu->createTexture(rtDesc, true, stretchedPixels.get(), stretchedRowBytes);
SkASSERT(texture);
}
return texture;
}
static GrContentKey::Domain ResizeDomain() {
static const GrContentKey::Domain kDomain = GrContentKey::GenerateDomain();
return kDomain;
}
GrTexture* GrContext::createTexture(const GrTextureParams* params,
const GrSurfaceDesc& desc,
const GrContentKey& origKey,
const void* srcData,
size_t rowBytes,
GrContentKey* outKey) {
GrTexture* texture;
uint32_t flags = get_texture_flags(fGpu, params, desc);
SkTCopyOnFirstWrite<GrContentKey> key(origKey);
if (flags) {
// We don't have a code path to resize compressed textures.
if (GrPixelConfigIsCompressed(desc.fConfig)) {
return NULL;
}
texture = this->createResizedTexture(desc, origKey, srcData, rowBytes,
SkToBool(flags & kBilerp_ResizeFlag));
GrContentKey::Builder builder(key.writable(), origKey, ResizeDomain(), 1);
builder[0] = flags;
} else {
texture = fGpu->createTexture(desc, true, srcData, rowBytes);
}
if (texture) {
if (texture->cacheAccess().setContentKey(*key)) {
if (outKey) {
*outKey = *key;
}
} else {
texture->unref();
texture = NULL;
}
}
return texture;
}
GrTexture* GrContext::findAndRefTexture(const GrSurfaceDesc& desc,
const GrContentKey& origKey,
const GrTextureParams* params) {
uint32_t flags = get_texture_flags(fGpu, params, desc);
SkTCopyOnFirstWrite<GrContentKey> key(origKey);
if (flags) {
GrContentKey::Builder builder(key.writable(), origKey, ResizeDomain(), 1);
builder[0] = flags;
}
GrGpuResource* resource = this->findAndRefCachedResource(*key);
if (resource) {
SkASSERT(static_cast<GrSurface*>(resource)->asTexture());
return static_cast<GrSurface*>(resource)->asTexture();
}
return NULL;
}
bool GrContext::isTextureInCache(const GrSurfaceDesc& desc,
const GrContentKey& origKey,
const GrTextureParams* params) const {
uint32_t flags = get_texture_flags(fGpu, params, desc);
SkTCopyOnFirstWrite<GrContentKey> key(origKey);
if (flags) {
GrContentKey::Builder builder(key.writable(), origKey, ResizeDomain(), 1);
builder[0] = flags;
}
return fResourceCache2->hasContentKey(*key);
GrTexture* GrContext::createTexture(const GrSurfaceDesc& desc, const void* srcData,
size_t rowBytes) {
return fGpu->createTexture(desc, true, srcData, rowBytes);
}
GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& inDesc, ScratchTexMatch match,
@ -526,19 +316,6 @@ GrTexture* GrContext::createUncachedTexture(const GrSurfaceDesc& desc,
return fGpu->createTexture(desc, false, srcData, rowBytes);
}
void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
if (maxTextures) {
*maxTextures = fResourceCache2->getMaxResourceCount();
}
if (maxTextureBytes) {
*maxTextureBytes = fResourceCache2->getMaxResourceBytes();
}
}
void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
fResourceCache2->setLimits(maxTextures, maxTextureBytes);
}
int GrContext::getMaxTextureSize() const {
return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
}
@ -563,22 +340,9 @@ GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDe
///////////////////////////////////////////////////////////////////////////////
bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
int width, int height) const {
bool GrContext::supportsIndex8PixelConfig() const {
const GrDrawTargetCaps* caps = fGpu->caps();
if (!caps->isConfigTexturable(kIndex_8_GrPixelConfig)) {
return false;
}
bool isPow2 = SkIsPow2(width) && SkIsPow2(height);
if (!isPow2) {
bool tiled = params && params->isTiled();
if (tiled && !caps->npotTextureTileSupport()) {
return false;
}
}
return true;
return caps->isConfigTexturable(kIndex_8_GrPixelConfig);
}
@ -1767,14 +1531,39 @@ const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture,
}
}
void GrContext::addResourceToCache(const GrContentKey& key, GrGpuResource* resource) {
resource->cacheAccess().setContentKey(key);
//////////////////////////////////////////////////////////////////////////////
void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
if (maxTextures) {
*maxTextures = fResourceCache2->getMaxResourceCount();
}
if (maxTextureBytes) {
*maxTextureBytes = fResourceCache2->getMaxResourceBytes();
}
}
void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
fResourceCache2->setLimits(maxTextures, maxTextureBytes);
}
bool GrContext::addResourceToCache(const GrContentKey& key, GrGpuResource* resource) {
ASSERT_OWNED_RESOURCE(resource);
if (!resource || resource->wasDestroyed()) {
return false;
}
return resource->cacheAccess().setContentKey(key);
}
bool GrContext::isResourceInCache(const GrContentKey& key) const {
return fResourceCache2->hasContentKey(key);
}
GrGpuResource* GrContext::findAndRefCachedResource(const GrContentKey& key) {
return fResourceCache2->findAndRefContentResource(key);
}
//////////////////////////////////////////////////////////////////////////////
void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) {
fGpu->addGpuTraceMarker(marker);
if (fDrawBuffer) {

View File

@ -9,6 +9,7 @@
#include "GrDrawTargetCaps.h"
#include "GrGpu.h"
#include "GrGpuResourceCacheAccess.h"
#include "GrXferProcessor.h"
#include "SkColorFilter.h"
#include "SkConfig8888.h"
@ -86,7 +87,45 @@ static void build_index8_data(void* buffer, const SkBitmap& bitmap) {
////////////////////////////////////////////////////////////////////////////////
static void generate_bitmap_key(const SkBitmap& bitmap, GrContentKey* key) {
enum Stretch {
kNo_Stretch,
kBilerp_Stretch,
kNearest_Stretch
};
static Stretch get_stretch_type(const GrContext* ctx, int width, int height,
const GrTextureParams* params) {
if (params && params->isTiled()) {
const GrDrawTargetCaps* caps = ctx->getGpu()->caps();
if (!caps->npotTextureTileSupport() && (!SkIsPow2(width) || !SkIsPow2(height))) {
switch(params->filterMode()) {
case GrTextureParams::kNone_FilterMode:
return kNearest_Stretch;
case GrTextureParams::kBilerp_FilterMode:
case GrTextureParams::kMipMap_FilterMode:
return kBilerp_Stretch;
}
}
}
return kNo_Stretch;
}
static bool make_resize_key(const GrContentKey& origKey, Stretch stretch, GrContentKey* resizeKey) {
if (origKey.isValid() && kNo_Stretch != stretch) {
static const GrContentKey::Domain kDomain = GrContentKey::GenerateDomain();
GrContentKey::Builder builder(resizeKey, origKey, kDomain, 1);
builder[0] = stretch;
builder.finish();
return true;
}
SkASSERT(!resizeKey->isValid());
return false;
}
static void generate_bitmap_keys(const SkBitmap& bitmap,
Stretch stretch,
GrContentKey* key,
GrContentKey* resizedKey) {
// Our id includes the offset, width, and height so that bitmaps created by extractSubset()
// are unique.
uint32_t genID = bitmap.getGenerationID();
@ -100,6 +139,11 @@ static void generate_bitmap_key(const SkBitmap& bitmap, GrContentKey* key) {
builder[1] = origin.fX;
builder[2] = origin.fY;
builder[3] = width | (height << 16);
builder.finish();
if (kNo_Stretch != stretch) {
make_resize_key(*key, stretch, resizedKey);
}
}
static void generate_bitmap_texture_desc(const SkBitmap& bitmap, GrSurfaceDesc* desc) {
@ -127,45 +171,106 @@ private:
} // namespace
#if 0 // TODO: plug this back up
static void add_genID_listener(const GrContentKey& key, SkPixelRef* pixelRef) {
SkASSERT(pixelRef);
pixelRef->addGenIDChangeListener(SkNEW_ARGS(GrResourceInvalidator, (key)));
}
#endif
// creates a new texture that is the input texture scaled up to the next power of two in
// width or height. If optionalKey is valid it will be set on the new texture. stretch
// controls whether the scaling is done using nearest or bilerp filtering.
GrTexture* resize_texture(GrTexture* inputTexture, Stretch stretch,
const GrContentKey& optionalKey) {
SkASSERT(kNo_Stretch != stretch);
GrContext* context = inputTexture->getContext();
SkASSERT(context);
// Either it's a cache miss or the original wasn't cached to begin with.
GrSurfaceDesc rtDesc = inputTexture->desc();
rtDesc.fFlags = rtDesc.fFlags |
kRenderTarget_GrSurfaceFlag |
kNoStencil_GrSurfaceFlag;
rtDesc.fWidth = GrNextPow2(rtDesc.fWidth);
rtDesc.fHeight = GrNextPow2(rtDesc.fHeight);
rtDesc.fConfig = GrMakePixelConfigUncompressed(rtDesc.fConfig);
// If the config isn't renderable try converting to either A8 or an 32 bit config. Otherwise,
// fail.
if (!context->isConfigRenderable(rtDesc.fConfig, false)) {
if (GrPixelConfigIsAlphaOnly(rtDesc.fConfig)) {
if (context->isConfigRenderable(kAlpha_8_GrPixelConfig, false)) {
rtDesc.fConfig = kAlpha_8_GrPixelConfig;
} else if (context->isConfigRenderable(kSkia8888_GrPixelConfig, false)) {
rtDesc.fConfig = kSkia8888_GrPixelConfig;
} else {
return NULL;
}
} else if (kRGB_GrColorComponentFlags ==
(kRGB_GrColorComponentFlags & GrPixelConfigComponentMask(rtDesc.fConfig))) {
if (context->isConfigRenderable(kSkia8888_GrPixelConfig, false)) {
rtDesc.fConfig = kSkia8888_GrPixelConfig;
} else {
return NULL;
}
} else {
return NULL;
}
}
GrTexture* resized = context->getGpu()->createTexture(rtDesc, true, NULL, 0);
if (!resized) {
return NULL;
}
GrPaint paint;
// If filtering is not desired then we want to ensure all texels in the resampled image are
// copies of texels from the original.
GrTextureParams params(SkShader::kClamp_TileMode,
kBilerp_Stretch == stretch ? GrTextureParams::kBilerp_FilterMode :
GrTextureParams::kNone_FilterMode);
paint.addColorTextureProcessor(inputTexture, SkMatrix::I(), params);
SkRect rect = SkRect::MakeWH(SkIntToScalar(rtDesc.fWidth), SkIntToScalar(rtDesc.fHeight));
SkRect localRect = SkRect::MakeWH(1.f, 1.f);
GrContext::AutoRenderTarget autoRT(context, resized->asRenderTarget());
GrContext::AutoClip ac(context, GrContext::AutoClip::kWideOpen_InitialClip);
context->drawNonAARectToRect(paint, SkMatrix::I(), rect, localRect);
if (optionalKey.isValid()) {
SkASSERT(context->addResourceToCache(optionalKey, resized));
}
return resized;
}
static GrTexture* sk_gr_allocate_texture(GrContext* ctx,
bool cache,
const GrTextureParams* params,
const SkBitmap& bm,
const GrContentKey& optionalKey,
GrSurfaceDesc desc,
const void* pixels,
size_t rowBytes) {
GrTexture* result;
if (cache) {
// This texture is likely to be used again so leave it in the cache
GrContentKey key;
generate_bitmap_key(bm, &key);
result = ctx->createTexture(params, desc, key, pixels, rowBytes, &key);
if (optionalKey.isValid()) {
result = ctx->createTexture(desc, pixels, rowBytes);
if (result) {
add_genID_listener(key, bm.pixelRef());
SkASSERT(ctx->addResourceToCache(optionalKey, result));
}
} else {
// This texture is unlikely to be used again (in its present form) so
// just use a scratch texture. This will remove the texture from the
// cache so no one else can find it. Additionally, once unlocked, the
// scratch texture will go to the end of the list for purging so will
// likely be available for this volatile bitmap the next time around.
} else {
result = ctx->refScratchTexture(desc, GrContext::kExact_ScratchTexMatch);
if (pixels) {
result->writePixels(0, 0, bm.width(), bm.height(), desc.fConfig, pixels, rowBytes);
if (pixels && result) {
result->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, pixels, rowBytes);
}
}
return result;
}
#ifndef SK_IGNORE_ETC1_SUPPORT
static GrTexture *load_etc1_texture(GrContext* ctx, bool cache,
const GrTextureParams* params,
static GrTexture *load_etc1_texture(GrContext* ctx, const GrContentKey& optionalKey,
const SkBitmap &bm, GrSurfaceDesc desc) {
SkAutoTUnref<SkData> data(bm.pixelRef()->refEncodedData());
@ -210,11 +315,11 @@ static GrTexture *load_etc1_texture(GrContext* ctx, bool cache,
return NULL;
}
return sk_gr_allocate_texture(ctx, cache, params, bm, desc, bytes, 0);
return sk_gr_allocate_texture(ctx, optionalKey, desc, bytes, 0);
}
#endif // SK_IGNORE_ETC1_SUPPORT
static GrTexture *load_yuv_texture(GrContext* ctx, bool cache, const GrTextureParams* params,
static GrTexture* load_yuv_texture(GrContext* ctx, const GrContentKey& key,
const SkBitmap& bm, const GrSurfaceDesc& desc) {
// Subsets are not supported, the whole pixelRef is loaded when using YUV decoding
SkPixelRef* pixelRef = bm.pixelRef();
@ -282,30 +387,31 @@ static GrTexture *load_yuv_texture(GrContext* ctx, bool cache, const GrTexturePa
kRenderTarget_GrSurfaceFlag |
kNoStencil_GrSurfaceFlag;
GrTexture* result = sk_gr_allocate_texture(ctx, cache, params, bm, rtDesc, NULL, 0);
GrRenderTarget* renderTarget = result ? result->asRenderTarget() : NULL;
if (renderTarget) {
SkAutoTUnref<GrFragmentProcessor> yuvToRgbProcessor(GrYUVtoRGBEffect::Create(
yuvTextures[0], yuvTextures[1], yuvTextures[2], yuvInfo.fColorSpace));
GrPaint paint;
paint.addColorProcessor(yuvToRgbProcessor);
SkRect r = SkRect::MakeWH(SkIntToScalar(yuvInfo.fSize[0].fWidth),
SkIntToScalar(yuvInfo.fSize[0].fHeight));
GrContext::AutoRenderTarget autoRT(ctx, renderTarget);
GrContext::AutoClip ac(ctx, GrContext::AutoClip::kWideOpen_InitialClip);
ctx->drawRect(paint, SkMatrix::I(), r);
} else {
SkSafeSetNull(result);
GrTexture* result = ctx->createTexture(rtDesc, NULL, 0);
if (!result) {
return NULL;
}
GrRenderTarget* renderTarget = result->asRenderTarget();
SkASSERT(renderTarget);
SkAutoTUnref<GrFragmentProcessor>
yuvToRgbProcessor(GrYUVtoRGBEffect::Create(yuvTextures[0], yuvTextures[1], yuvTextures[2],
yuvInfo.fColorSpace));
GrPaint paint;
paint.addColorProcessor(yuvToRgbProcessor);
SkRect r = SkRect::MakeWH(SkIntToScalar(yuvInfo.fSize[0].fWidth),
SkIntToScalar(yuvInfo.fSize[0].fHeight));
GrContext::AutoRenderTarget autoRT(ctx, renderTarget);
GrContext::AutoClip ac(ctx, GrContext::AutoClip::kWideOpen_InitialClip);
ctx->drawRect(paint, SkMatrix::I(), r);
return result;
}
static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
bool cache,
const GrTextureParams* params,
const SkBitmap& origBitmap) {
static GrTexture* create_unstretched_bitmap_texture(GrContext* ctx,
const SkBitmap& origBitmap,
const GrContentKey& optionalKey) {
SkBitmap tmpBitmap;
const SkBitmap* bitmap = &origBitmap;
@ -314,9 +420,7 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
generate_bitmap_texture_desc(*bitmap, &desc);
if (kIndex_8_SkColorType == bitmap->colorType()) {
// build_compressed_data doesn't do npot->pot expansion
// and paletted textures can't be sub-updated
if (cache && ctx->supportsIndex8PixelConfig(params, bitmap->width(), bitmap->height())) {
if (ctx->supportsIndex8PixelConfig()) {
size_t imageSize = GrCompressedFormatDataSize(kIndex_8_GrPixelConfig,
bitmap->width(), bitmap->height());
SkAutoMalloc storage(imageSize);
@ -324,8 +428,7 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
// our compressed data will be trimmed, so pass width() for its
// "rowBytes", since they are the same now.
return sk_gr_allocate_texture(ctx, cache, params, origBitmap,
desc, storage.get(), bitmap->width());
return sk_gr_allocate_texture(ctx, optionalKey, desc, storage.get(), bitmap->width());
} else {
origBitmap.copyTo(&tmpBitmap, kN32_SkColorType);
// now bitmap points to our temp, which has been promoted to 32bits
@ -339,7 +442,7 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
else if (
// We do not support scratch ETC1 textures, hence they should all be at least
// trying to go to the cache.
cache
optionalKey.isValid()
// Make sure that the underlying device supports ETC1 textures before we go ahead
// and check the data.
&& ctx->getGpu()->caps()->isConfigTexturable(kETC1_GrPixelConfig)
@ -348,7 +451,7 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
// the bitmap has available pixels, then they might not be what the decompressed
// data is.
&& !(bitmap->readyToDraw())) {
GrTexture *texture = load_etc1_texture(ctx, cache, params, *bitmap, desc);
GrTexture *texture = load_etc1_texture(ctx, optionalKey, *bitmap, desc);
if (texture) {
return texture;
}
@ -356,7 +459,7 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
#endif // SK_IGNORE_ETC1_SUPPORT
else {
GrTexture *texture = load_yuv_texture(ctx, cache, params, *bitmap, desc);
GrTexture *texture = load_yuv_texture(ctx, optionalKey, *bitmap, desc);
if (texture) {
return texture;
}
@ -366,8 +469,32 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
return NULL;
}
return sk_gr_allocate_texture(ctx, cache, params, origBitmap, desc,
bitmap->getPixels(), bitmap->rowBytes());
return sk_gr_allocate_texture(ctx, optionalKey, desc, bitmap->getPixels(), bitmap->rowBytes());
}
static GrTexture* create_bitmap_texture(GrContext* ctx,
const SkBitmap& bmp,
Stretch stretch,
const GrContentKey& unstretchedKey,
const GrContentKey& stretchedKey) {
if (kNo_Stretch != stretch) {
SkAutoTUnref<GrTexture> unstretched;
// Check if we have the unstretched version in the cache, if not create it.
if (unstretchedKey.isValid()) {
unstretched.reset(ctx->findAndRefCachedTexture(unstretchedKey));
}
if (!unstretched) {
unstretched.reset(create_unstretched_bitmap_texture(ctx, bmp, unstretchedKey));
if (!unstretched) {
return NULL;
}
}
GrTexture* resized = resize_texture(unstretched, stretch, stretchedKey);
return resized;
}
return create_unstretched_bitmap_texture(ctx, bmp, unstretchedKey);
}
static GrTexture* get_texture_backing_bmp(const SkBitmap& bitmap, const GrContext* context,
@ -393,12 +520,23 @@ bool GrIsBitmapInCache(const GrContext* ctx,
return true;
}
GrContentKey key;
generate_bitmap_key(bitmap, &key);
// We don't cache volatile bitmaps
if (bitmap.isVolatile()) {
return false;
}
// If it is inherently texture backed, consider it in the cache
if (bitmap.getTexture()) {
return true;
}
Stretch stretch = get_stretch_type(ctx, bitmap.width(), bitmap.height(), params);
GrContentKey key, resizedKey;
generate_bitmap_keys(bitmap, stretch, &key, &resizedKey);
GrSurfaceDesc desc;
generate_bitmap_texture_desc(bitmap, &desc);
return ctx->isTextureInCache(desc, key, params);
return ctx->isResourceInCache((kNo_Stretch == stretch) ? key : resizedKey);
}
GrTexture* GrRefCachedBitmapTexture(GrContext* ctx,
@ -409,29 +547,29 @@ GrTexture* GrRefCachedBitmapTexture(GrContext* ctx,
return SkRef(result);
}
bool cache = !bitmap.isVolatile();
Stretch stretch = get_stretch_type(ctx, bitmap.width(), bitmap.height(), params);
GrContentKey key, resizedKey;
if (cache) {
if (!bitmap.isVolatile()) {
// If the bitmap isn't changing try to find a cached copy first.
generate_bitmap_keys(bitmap, stretch, &key, &resizedKey);
GrContentKey key;
generate_bitmap_key(bitmap, &key);
result = ctx->findAndRefCachedTexture(resizedKey.isValid() ? resizedKey : key);
if (result) {
return result;
}
}
GrSurfaceDesc desc;
generate_bitmap_texture_desc(bitmap, &desc);
result = create_bitmap_texture(ctx, bitmap, stretch, key, resizedKey);
if (result) {
return result;
}
result = ctx->findAndRefTexture(desc, key, params);
}
if (NULL == result) {
result = sk_gr_create_bitmap_texture(ctx, cache, params, bitmap);
}
if (NULL == result) {
SkDebugf("---- failed to create texture for cache [%d %d]\n",
bitmap.width(), bitmap.height());
}
return result;
SkDebugf("---- failed to create texture for cache [%d %d]\n",
bitmap.width(), bitmap.height());
return NULL;
}
///////////////////////////////////////////////////////////////////////////////
// alphatype is ignore for now, but if GrPixelConfig is expanded to encompass

View File

@ -190,7 +190,6 @@ GrTextureStripAtlas::AtlasRow* GrTextureStripAtlas::getLRU() {
}
void GrTextureStripAtlas::lockTexture() {
GrTextureParams params;
GrSurfaceDesc texDesc;
texDesc.fWidth = fDesc.fWidth;
texDesc.fHeight = fDesc.fHeight;
@ -202,9 +201,10 @@ void GrTextureStripAtlas::lockTexture() {
builder[0] = static_cast<uint32_t>(fCacheKey);
builder.finish();
fTexture = fDesc.fContext->findAndRefTexture(texDesc, key, &params);
fTexture = fDesc.fContext->findAndRefCachedTexture(key);
if (NULL == fTexture) {
fTexture = fDesc.fContext->createTexture(&params, texDesc, key, NULL, 0);
fTexture = fDesc.fContext->createTexture(texDesc, NULL, 0);
SkAssertResult(fDesc.fContext->addResourceToCache(key, fTexture));
// This is a new texture, so all of our cache info is now invalid
this->initLRU();
fKeyTable.rewind();

View File

@ -114,14 +114,14 @@ static GrRenderTarget* random_render_target(GrContext* context, SkRandom* random
builder[0] = texDesc.fOrigin;
builder.finish();
SkAutoTUnref<GrTexture> texture(context->findAndRefTexture(texDesc, key, &params));
GrTexture* texture = context->findAndRefCachedTexture(key);
if (!texture) {
texture.reset(context->createTexture(&params, texDesc, key, 0, 0));
if (!texture) {
return NULL;
texture = context->createTexture(texDesc);
if (texture) {
SkAssertResult(context->addResourceToCache(key, texture));
}
}
return SkRef(texture->asRenderTarget());
return texture ? texture->asRenderTarget() : NULL;
}
static void set_random_xpf(GrContext* context, const GrDrawTargetCaps& caps,