Move resize functionality out of createAndLockTexture and into createResizedTexture

http://codereview.appspot.com/6459080/



git-svn-id: http://skia.googlecode.com/svn/trunk@5066 2bbb7eff-a529-9590-31e7-b0007b416f81
This commit is contained in:
robertphillips@google.com 2012-08-13 18:00:36 +00:00
parent 9c2ea84635
commit 3319f33470
2 changed files with 97 additions and 80 deletions

View File

@ -806,6 +806,12 @@ private:
kDontFlush_PixelOpsFlag = 0x1, kDontFlush_PixelOpsFlag = 0x1,
}; };
GrTexture* createResizedTexture(const GrTextureDesc& desc,
const GrCacheData& cacheData,
void* srcData,
size_t rowBytes,
bool needsFiltering);
bool internalReadRenderTargetPixels(GrRenderTarget* target, bool internalReadRenderTargetPixels(GrRenderTarget* target,
int left, int top, int left, int top,
int width, int height, int width, int height,

View File

@ -308,6 +308,87 @@ static void stretchImage(void* dst,
} }
} }
// The desired texture is NPOT and tiled but that isn't supported by
// the current hardware. Resize the texture to be a POT
GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
const GrCacheData& cacheData,
void* srcData,
size_t rowBytes,
bool needsFiltering) {
TextureCacheEntry clampEntry = this->findAndLockTexture(desc, cacheData, NULL);
if (NULL == clampEntry.texture()) {
clampEntry = this->createAndLockTexture(NULL, desc, cacheData, srcData, rowBytes);
GrAssert(NULL != clampEntry.texture());
if (NULL == clampEntry.texture()) {
return NULL;
}
}
GrTextureDesc rtDesc = desc;
rtDesc.fFlags = rtDesc.fFlags |
kRenderTarget_GrTextureFlagBit |
kNoStencil_GrTextureFlagBit;
rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64));
rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64));
GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
if (NULL != texture) {
GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
GrDrawState* drawState = fGpu->drawState();
drawState->setRenderTarget(texture->asRenderTarget());
// if filtering is not desired then we want to ensure all
// texels in the resampled image are copies of texels from
// the original.
drawState->sampler(0)->reset(SkShader::kClamp_TileMode,
needsFiltering);
drawState->createTextureEffect(0, clampEntry.texture());
static const GrVertexLayout layout =
GrDrawTarget::StageTexCoordVertexLayoutBit(0,0);
GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0);
if (arg.succeeded()) {
GrPoint* verts = (GrPoint*) arg.vertices();
verts[0].setIRectFan(0, 0,
texture->width(),
texture->height(),
2*sizeof(GrPoint));
verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint));
fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType,
0, 4);
}
texture->releaseRenderTarget();
} else {
// TODO: Our CPU stretch doesn't filter. But we create separate
// stretched textures when the sampler state is either filtered or
// not. Either implement filtered stretch blit on CPU or just create
// one when FBO case fails.
rtDesc.fFlags = kNone_GrTextureFlags;
// no longer need to clamp at min RT size.
rtDesc.fWidth = GrNextPow2(desc.fWidth);
rtDesc.fHeight = GrNextPow2(desc.fHeight);
int bpp = GrBytesPerPixel(desc.fConfig);
SkAutoSMalloc<128*128*4> stretchedPixels(bpp *
rtDesc.fWidth *
rtDesc.fHeight);
stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
srcData, desc.fWidth, desc.fHeight, bpp);
size_t stretchedRowBytes = rtDesc.fWidth * bpp;
GrTexture* texture = fGpu->createTexture(rtDesc,
stretchedPixels.get(),
stretchedRowBytes);
GrAssert(NULL != texture);
}
fTextureCache->unlock(clampEntry.cacheEntry());
return texture;
}
GrContext::TextureCacheEntry GrContext::createAndLockTexture( GrContext::TextureCacheEntry GrContext::createAndLockTexture(
const GrTextureParams* params, const GrTextureParams* params,
const GrTextureDesc& desc, const GrTextureDesc& desc,
@ -324,89 +405,19 @@ GrContext::TextureCacheEntry GrContext::createAndLockTexture(
GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false); GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
GrTexture* texture = NULL;
if (GrTexture::NeedsResizing(resourceKey)) { if (GrTexture::NeedsResizing(resourceKey)) {
// The desired texture is NPOT and tiled but that isn't supported by texture = this->createResizedTexture(desc, cacheData,
// the current hardware. Resize the texture to be a POT srcData, rowBytes,
GrAssert(NULL != params); GrTexture::NeedsFiltering(resourceKey));
TextureCacheEntry clampEntry = this->findAndLockTexture(desc, cacheData, NULL);
if (NULL == clampEntry.texture()) {
clampEntry = this->createAndLockTexture(NULL, desc, cacheData, srcData, rowBytes);
GrAssert(NULL != clampEntry.texture());
if (NULL == clampEntry.texture()) {
return entry;
}
}
GrTextureDesc rtDesc = desc;
rtDesc.fFlags = rtDesc.fFlags |
kRenderTarget_GrTextureFlagBit |
kNoStencil_GrTextureFlagBit;
rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64));
rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64));
GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
if (NULL != texture) {
GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
GrDrawState* drawState = fGpu->drawState();
drawState->setRenderTarget(texture->asRenderTarget());
// if filtering is not desired then we want to ensure all
// texels in the resampled image are copies of texels from
// the original.
drawState->sampler(0)->reset(SkShader::kClamp_TileMode,
GrTexture::NeedsFiltering(resourceKey));
drawState->createTextureEffect(0, clampEntry.texture());
static const GrVertexLayout layout =
GrDrawTarget::StageTexCoordVertexLayoutBit(0,0);
GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0);
if (arg.succeeded()) {
GrPoint* verts = (GrPoint*) arg.vertices();
verts[0].setIRectFan(0, 0,
texture->width(),
texture->height(),
2*sizeof(GrPoint));
verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint));
fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType,
0, 4);
entry.set(fTextureCache->createAndLock(resourceKey, texture));
}
texture->releaseRenderTarget();
} else {
// TODO: Our CPU stretch doesn't filter. But we create separate
// stretched textures when the sampler state is either filtered or
// not. Either implement filtered stretch blit on CPU or just create
// one when FBO case fails.
rtDesc.fFlags = kNone_GrTextureFlags;
// no longer need to clamp at min RT size.
rtDesc.fWidth = GrNextPow2(desc.fWidth);
rtDesc.fHeight = GrNextPow2(desc.fHeight);
int bpp = GrBytesPerPixel(desc.fConfig);
SkAutoSMalloc<128*128*4> stretchedPixels(bpp *
rtDesc.fWidth *
rtDesc.fHeight);
stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
srcData, desc.fWidth, desc.fHeight, bpp);
size_t stretchedRowBytes = rtDesc.fWidth * bpp;
GrTexture* texture = fGpu->createTexture(rtDesc,
stretchedPixels.get(),
stretchedRowBytes);
GrAssert(NULL != texture);
entry.set(fTextureCache->createAndLock(resourceKey, texture));
}
fTextureCache->unlock(clampEntry.cacheEntry());
} else { } else {
GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes); texture = fGpu->createTexture(desc, srcData, rowBytes);
if (NULL != texture) {
entry.set(fTextureCache->createAndLock(resourceKey, texture));
}
} }
if (NULL != texture) {
entry.set(fTextureCache->createAndLock(resourceKey, texture));
}
return entry; return entry;
} }