rename GrTextureDesc->GrSurfaceDesc, GrTextureFlags->GrSurfaceFlags

Review URL: https://codereview.chromium.org/682223002
This commit is contained in:
bsalomon 2014-10-28 14:33:06 -07:00 committed by Commit bot
parent 0ec28af1a7
commit f2703d83da
60 changed files with 229 additions and 226 deletions

View File

@ -63,7 +63,7 @@ public:
return 100 + ((fID % 1 == 0) ? -40 : 33);
}
static GrResourceKey ComputeKey(const GrTextureDesc& desc) {
static GrResourceKey ComputeKey(const GrSurfaceDesc& desc) {
GrCacheID::Key key;
memset(&key, 0, sizeof(key));
key.fData32[0] = (desc.fWidth) | (desc.fHeight << 16);
@ -86,9 +86,9 @@ static void get_stencil(int i, int* w, int* h, int* s) {
*s = i % 1 == 0 ? 0 : 4;
}
static void get_texture_desc(int i, GrTextureDesc* desc) {
desc->fFlags = kRenderTarget_GrTextureFlagBit |
kNoStencil_GrTextureFlagBit;
static void get_texture_desc(int i, GrSurfaceDesc* desc) {
desc->fFlags = kRenderTarget_GrSurfaceFlag |
kNoStencil_GrSurfaceFlag;
desc->fWidth = i % 1024;
desc->fHeight = i * 2 % 1024;
desc->fConfig = static_cast<GrPixelConfig>(i % (kLast_GrPixelConfig + 1));
@ -107,7 +107,7 @@ static void populate_cache(GrResourceCache* cache, GrGpu* gpu, int resourceCount
}
for (int i = 0; i < resourceCount; ++i) {
GrTextureDesc desc;
GrSurfaceDesc desc;
get_texture_desc(i, &desc);
GrResourceKey key = TextureResource::ComputeKey(desc);
GrGpuResource* resource = SkNEW_ARGS(TextureResource, (gpu, i));
@ -120,7 +120,7 @@ static void populate_cache(GrResourceCache* cache, GrGpu* gpu, int resourceCount
static void check_cache_contents_or_die(GrResourceCache* cache, int k) {
// Benchmark find calls that succeed.
{
GrTextureDesc desc;
GrSurfaceDesc desc;
get_texture_desc(k, &desc);
GrResourceKey key = TextureResource::ComputeKey(desc);
GrGpuResource* item = cache->find(key);
@ -150,7 +150,7 @@ static void check_cache_contents_or_die(GrResourceCache* cache, int k) {
// Benchmark also find calls that always fail.
{
GrTextureDesc desc;
GrSurfaceDesc desc;
get_texture_desc(k, &desc);
desc.fHeight |= 1;
GrResourceKey key = TextureResource::ComputeKey(desc);

View File

@ -1759,9 +1759,9 @@ ErrorCombination run_multiple_configs(GMMain &gmmain, GM *gm,
bool grSuccess = false;
if (gr) {
// create a render target to back the device
GrTextureDesc desc;
GrSurfaceDesc desc;
desc.fConfig = kSkia8888_GrPixelConfig;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = gm->getISize().width();
desc.fHeight = gm->getISize().height();
desc.fSampleCnt = config.fSampleCnt;

View File

@ -78,10 +78,10 @@ protected:
}
}
GrTextureDesc desc;
GrSurfaceDesc desc;
// use RT flag bit because in GL it makes the texture be bottom-up
desc.fFlags = i ? kRenderTarget_GrTextureFlagBit :
kNone_GrTextureFlags;
desc.fFlags = i ? kRenderTarget_GrSurfaceFlag :
kNone_GrSurfaceFlags;
desc.fConfig = kSkia8888_GrPixelConfig;
desc.fWidth = 2 * S;
desc.fHeight = 2 * S;

View File

@ -126,11 +126,11 @@ private:
#if SK_SUPPORT_GPU
GrContext* context = baseCanvas->getGrContext();
if (context) {
GrTextureDesc desc;
GrSurfaceDesc desc;
desc.fWidth = w;
desc.fHeight = h;
desc.fConfig = SkImageInfo2GrPixelConfig(baseCanvas->imageInfo());
desc.fFlags = kRenderTarget_GrTextureFlagBit;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
SkAutoTUnref<GrSurface> surface(context->createUncachedTexture(desc, NULL, 0));
SkAutoTUnref<SkBaseDevice> device(SkGpuDevice::Create(surface.get(),
SkSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType)));

View File

@ -208,7 +208,7 @@ public:
* @param cacheKey (optional) If non-NULL, we'll write the cache key we used to cacheKey.
*/
GrTexture* createTexture(const GrTextureParams* params,
const GrTextureDesc& desc,
const GrSurfaceDesc& desc,
const GrCacheID& cacheID,
const void* srcData,
size_t rowBytes,
@ -224,7 +224,7 @@ public:
* for different wrap modes on GPUs with limited NPOT
* texture support). NULL implies clamp wrap modes.
*/
GrTexture* findAndRefTexture(const GrTextureDesc& desc,
GrTexture* findAndRefTexture(const GrSurfaceDesc& desc,
const GrCacheID& cacheID,
const GrTextureParams* params);
/**
@ -232,13 +232,13 @@ public:
* will not be locked or returned. This call does not affect the priority of
* the texture for deletion.
*/
bool isTextureInCache(const GrTextureDesc& desc,
bool isTextureInCache(const GrSurfaceDesc& desc,
const GrCacheID& cacheID,
const GrTextureParams* params) const;
/**
* Enum that determines how closely a returned scratch texture must match
* a provided GrTextureDesc.
* a provided GrSurfaceDesc.
*/
enum ScratchTexMatch {
/**
@ -269,7 +269,7 @@ public:
* internalFlag is a temporary workaround until changes in the internal
* architecture are complete. Use the default value.
*/
GrTexture* refScratchTexture(const GrTextureDesc&, ScratchTexMatch match,
GrTexture* refScratchTexture(const GrSurfaceDesc&, ScratchTexMatch match,
bool internalFlag = false);
/**
@ -280,7 +280,7 @@ public:
* tiling non-power-of-two textures on APIs that don't support this (e.g.
* unextended GLES2). NPOT uncached textures are not tilable on such APIs.
*/
GrTexture* createUncachedTexture(const GrTextureDesc& desc,
GrTexture* createUncachedTexture(const GrSurfaceDesc& desc,
void* srcData,
size_t rowBytes);
@ -1001,13 +1001,13 @@ private:
void internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
const GrStrokeInfo& stroke);
GrTexture* createResizedTexture(const GrTextureDesc& desc,
GrTexture* createResizedTexture(const GrSurfaceDesc& desc,
const GrCacheID& cacheID,
const void* srcData,
size_t rowBytes,
bool filter);
GrTexture* createNewScratchTexture(const GrTextureDesc& desc);
GrTexture* createNewScratchTexture(const GrSurfaceDesc& desc);
/**
* These functions create premul <-> unpremul effects if it is possible to generate a pair

View File

@ -135,7 +135,7 @@ protected:
GrRenderTarget(GrGpu* gpu,
bool isWrapped,
GrTexture* texture,
const GrTextureDesc& desc)
const GrSurfaceDesc& desc)
: INHERITED(gpu, isWrapped, desc)
, fStencilBuffer(NULL)
, fTexture(texture) {

View File

@ -58,7 +58,7 @@ public:
/**
* Return the descriptor describing the surface
*/
const GrTextureDesc& desc() const { return fDesc; }
const GrSurfaceDesc& desc() const { return fDesc; }
/**
* @return the texture associated with the surface, may be NULL.
@ -136,12 +136,12 @@ protected:
// Provides access to methods that should be public within Skia code.
friend class GrSurfacePriv;
GrSurface(GrGpu* gpu, bool isWrapped, const GrTextureDesc& desc)
GrSurface(GrGpu* gpu, bool isWrapped, const GrSurfaceDesc& desc)
: INHERITED(gpu, isWrapped)
, fDesc(desc) {
}
GrTextureDesc fDesc;
GrSurfaceDesc fDesc;
private:
typedef GrGpuResource INHERITED;

View File

@ -58,7 +58,7 @@ protected:
// the subclass constructor to initialize this pointer.
SkAutoTUnref<GrRenderTarget> fRenderTarget;
GrTexture(GrGpu* gpu, bool isWrapped, const GrTextureDesc& desc);
GrTexture(GrGpu* gpu, bool isWrapped, const GrSurfaceDesc& desc);
virtual ~GrTexture();

View File

@ -419,34 +419,37 @@ static inline bool GrPixelConfigIsAlphaOnly(GrPixelConfig config) {
}
/**
* Optional bitfield flags that can be passed to createTexture.
* Optional bitfield flags that can be set on GrSurfaceDesc (below).
*/
enum GrTextureFlags {
kNone_GrTextureFlags = 0x0,
enum GrSurfaceFlags {
kNone_GrSurfaceFlags = 0x0,
/**
* Creates a texture that can be rendered to as a GrRenderTarget. Use
* GrTexture::asRenderTarget() to access.
*/
kRenderTarget_GrTextureFlagBit = 0x1,
kRenderTarget_GrSurfaceFlag = 0x1,
/**
* By default all render targets have an associated stencil buffer that
* may be required for path filling. This flag overrides stencil buffer
* creation.
* MAKE THIS PRIVATE?
*/
kNoStencil_GrTextureFlagBit = 0x2,
/**
* Hint that the CPU may modify this texture after creation.
*/
kDynamicUpdate_GrTextureFlagBit = 0x4,
kNoStencil_GrSurfaceFlag = 0x2,
/**
* Indicates that all allocations (color buffer, FBO completeness, etc)
* should be verified.
*/
kCheckAllocation_GrTextureFlagBit = 0x8,
kCheckAllocation_GrSurfaceFlag = 0x4,
};
GR_MAKE_BITFIELD_OPS(GrTextureFlags)
GR_MAKE_BITFIELD_OPS(GrSurfaceFlags)
// Legacy aliases
typedef GrSurfaceFlags GrTextureFlags;
static const GrSurfaceFlags kNone_GrTextureFlags = kNone_GrSurfaceFlags;
static const GrSurfaceFlags kRenderTarget_GrTExtureFlagBit = kRenderTarget_GrSurfaceFlag;
static const GrSurfaceFlags kNoStencil_GrTextureFlagBit = kNoStencil_GrSurfaceFlag;
static const GrSurfaceFlags kCheckAllocation_GrTextureFlagBit = kCheckAllocation_GrSurfaceFlag;
/**
* Some textures will be stored such that the upper and left edges of the content meet at the
@ -462,11 +465,11 @@ enum GrSurfaceOrigin {
};
/**
* Describes a texture to be created.
* Describes a surface to be created.
*/
struct GrTextureDesc {
GrTextureDesc()
: fFlags(kNone_GrTextureFlags)
struct GrSurfaceDesc {
GrSurfaceDesc()
: fFlags(kNone_GrSurfaceFlags)
, fOrigin(kDefault_GrSurfaceOrigin)
, fWidth(0)
, fHeight(0)
@ -474,7 +477,7 @@ struct GrTextureDesc {
, fSampleCnt(0) {
}
GrTextureFlags fFlags; //!< bitfield of TextureFlags
GrSurfaceFlags fFlags; //!< bitfield of TextureFlags
GrSurfaceOrigin fOrigin; //!< origin of the texture
int fWidth; //!< Width of the texture
int fHeight; //!< Height of the texture
@ -487,7 +490,7 @@ struct GrTextureDesc {
/**
* The number of samples per pixel or 0 to disable full scene AA. This only
* applies if the kRenderTarget_GrTextureFlagBit is set. The actual number
* applies if the kRenderTarget_GrSurfaceFlag is set. The actual number
* of samples may not exactly match the request. The request will be rounded
* up to the next supported sample count, or down if it is larger than the
* max supported count.
@ -495,6 +498,9 @@ struct GrTextureDesc {
int fSampleCnt;
};
// Legacy alias
typedef GrSurfaceDesc GrTextureDesc;
/**
* GrCacheID is used create and find cached GrResources (e.g. GrTextures). The ID has two parts:
* the domain and the key. Domains simply allow multiple clients to use 0-based indices as their
@ -585,15 +591,12 @@ enum GrBackendTextureFlags {
/**
* No flags enabled
*/
kNone_GrBackendTextureFlag = kNone_GrTextureFlags,
kNone_GrBackendTextureFlag = 0,
/**
* Indicates that the texture is also a render target, and thus should have
* a GrRenderTarget object.
*
* D3D (future): client must have created the texture with flags that allow
* it to be used as a render target.
*/
kRenderTarget_GrBackendTextureFlag = kRenderTarget_GrTextureFlagBit,
kRenderTarget_GrBackendTextureFlag = kRenderTarget_GrSurfaceFlag,
};
GR_MAKE_BITFIELD_OPS(GrBackendTextureFlags)

View File

@ -249,8 +249,8 @@ bool SkImageFilter::filterImageGPU(Proxy* proxy, const SkBitmap& src, const Cont
SkRect dstRect = SkRect::MakeWH(srcRect.width(), srcRect.height());
GrContext* context = srcTexture->getContext();
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit,
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag,
desc.fWidth = bounds.width();
desc.fHeight = bounds.height();
desc.fConfig = kRGBA_8888_GrPixelConfig;

View File

@ -272,13 +272,13 @@ bool SkAlphaThresholdFilterImpl::asFragmentProcessor(GrFragmentProcessor** fp,
const SkIRect&) const {
if (fp) {
GrContext* context = texture->getContext();
GrTextureDesc maskDesc;
GrSurfaceDesc maskDesc;
if (context->isConfigRenderable(kAlpha_8_GrPixelConfig, false)) {
maskDesc.fConfig = kAlpha_8_GrPixelConfig;
} else {
maskDesc.fConfig = kRGBA_8888_GrPixelConfig;
}
maskDesc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit;
maskDesc.fFlags = kRenderTarget_GrSurfaceFlag | kNoStencil_GrSurfaceFlag;
// Add one pixel of border to ensure that clamp mode will be all zeros
// the outside.
maskDesc.fWidth = texture->width();

View File

@ -767,7 +767,7 @@ void GrGLRectBlurEffect::setData(const GrGLProgramDataManager& pdman,
bool GrRectBlurEffect::CreateBlurProfileTexture(GrContext *context, float sigma,
GrTexture **blurProfileTexture) {
GrTextureParams params;
GrTextureDesc texDesc;
GrSurfaceDesc texDesc;
unsigned int profile_size = SkScalarCeilToInt(6*sigma);
@ -944,7 +944,7 @@ GrFragmentProcessor* GrRRectBlurEffect::Create(GrContext* context, float sigma,
unsigned int smallRectSide = 2*(blurRadius + cornerRadius) + 1;
unsigned int texSide = smallRectSide + 2*blurRadius;
GrTextureDesc texDesc;
GrSurfaceDesc texDesc;
texDesc.fWidth = texSide;
texDesc.fHeight = texSide;
texDesc.fConfig = kAlpha_8_GrPixelConfig;

View File

@ -351,7 +351,7 @@ GrFragmentProcessor* SkColorCubeFilter::asFragmentProcessor(GrContext* context)
key.fData64[1] = 0;
GrCacheID cacheID(gCubeDomain, key);
GrTextureDesc desc;
GrSurfaceDesc desc;
desc.fWidth = fCache.cubeDimension();
desc.fHeight = fCache.cubeDimension() * fCache.cubeDimension();
desc.fConfig = kRGBA_8888_GrPixelConfig;

View File

@ -408,8 +408,8 @@ bool SkDisplacementMapEffect::filterImageGPU(Proxy* proxy, const SkBitmap& src,
GrTexture* displacement = displacementBM.getTexture();
GrContext* context = color->getContext();
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag | kNoStencil_GrSurfaceFlag;
desc.fWidth = bounds.width();
desc.fHeight = bounds.height();
desc.fConfig = kSkia8888_GrPixelConfig;

View File

@ -172,8 +172,8 @@ GrTexture* GaussianBlur(GrContext* context,
kRGBA_8888_GrPixelConfig == srcTexture->config() ||
kAlpha_8_GrPixelConfig == srcTexture->config());
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag | kNoStencil_GrSurfaceFlag;
desc.fWidth = SkScalarFloorToInt(srcRect.width());
desc.fHeight = SkScalarFloorToInt(srcRect.height());
desc.fConfig = srcTexture->config();

View File

@ -513,8 +513,8 @@ bool apply_morphology(const SkBitmap& input,
SkIntToScalar(srcTexture->height())));
SkIRect dstRect = SkIRect::MakeWH(rect.width(), rect.height());
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag | kNoStencil_GrSurfaceFlag;
desc.fWidth = rect.width();
desc.fHeight = rect.height();
desc.fConfig = kSkia8888_GrPixelConfig;

View File

@ -134,8 +134,8 @@ bool SkXfermodeImageFilter::filterImageGPU(Proxy* proxy,
GrFragmentProcessor* xferProcessor = NULL;
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag | kNoStencil_GrSurfaceFlag;
desc.fWidth = src.width();
desc.fHeight = src.height();
desc.fConfig = kSkia8888_GrPixelConfig;

View File

@ -140,7 +140,7 @@ GrAADistanceFieldPathRenderer::PathData* GrAADistanceFieldPathRenderer::addPathT
if (NULL == fAtlas) {
SkISize textureSize = SkISize::Make(ATLAS_TEXTURE_WIDTH, ATLAS_TEXTURE_HEIGHT);
fAtlas = SkNEW_ARGS(GrAtlas, (fContext->getGpu(), kAlpha_8_GrPixelConfig,
kNone_GrTextureFlags, textureSize,
kNone_GrSurfaceFlags, textureSize,
NUM_PLOTS_X, NUM_PLOTS_Y, false));
if (NULL == fAtlas) {
return NULL;

View File

@ -143,7 +143,7 @@ void GrPlot::resetRects() {
///////////////////////////////////////////////////////////////////////////////
GrAtlas::GrAtlas(GrGpu* gpu, GrPixelConfig config, GrTextureFlags flags,
GrAtlas::GrAtlas(GrGpu* gpu, GrPixelConfig config, GrSurfaceFlags flags,
const SkISize& backingTextureSize,
int numPlotsX, int numPlotsY, bool batchUploads) {
fGpu = SkRef(gpu);
@ -219,8 +219,8 @@ GrPlot* GrAtlas::addToAtlas(ClientPlotUsage* usage,
// before we get a new plot, make sure we have a backing texture
if (NULL == fTexture) {
// TODO: Update this to use the cache rather than directly creating a texture.
GrTextureDesc desc;
desc.fFlags = fFlags | kDynamicUpdate_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = fFlags;
desc.fWidth = fBackingTextureSize.width();
desc.fHeight = fBackingTextureSize.height();
desc.fConfig = fPixelConfig;

View File

@ -93,7 +93,7 @@ public:
friend class GrAtlas;
};
GrAtlas(GrGpu*, GrPixelConfig, GrTextureFlags flags,
GrAtlas(GrGpu*, GrPixelConfig, GrSurfaceFlags flags,
const SkISize& backingTextureSize,
int numPlotsX, int numPlotsY, bool batchUploads);
~GrAtlas();
@ -135,7 +135,7 @@ private:
GrGpu* fGpu;
GrPixelConfig fPixelConfig;
GrTextureFlags fFlags;
GrSurfaceFlags fFlags;
GrTexture* fTexture;
SkISize fBackingTextureSize;
int fNumPlotsX;

View File

@ -114,7 +114,7 @@ public:
}
void acquireMask(int32_t clipGenID,
const GrTextureDesc& desc,
const GrSurfaceDesc& desc,
const SkIRect& bound) {
if (fStack.empty()) {
@ -199,7 +199,7 @@ private:
void acquireMask(GrContext* context,
int32_t clipGenID,
const GrTextureDesc& desc,
const GrSurfaceDesc& desc,
const SkIRect& bound) {
fLastClipGenID = clipGenID;
@ -215,7 +215,7 @@ private:
void reset () {
fLastClipGenID = SkClipStack::kInvalidGenID;
GrTextureDesc desc;
GrSurfaceDesc desc;
fLastMask.reset(NULL);
fLastBound.setEmpty();

View File

@ -481,8 +481,8 @@ void GrClipMaskManager::mergeMask(GrTexture* dstMask,
}
GrTexture* GrClipMaskManager::createTempMask(int width, int height) {
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit|kNoStencil_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag|kNoStencil_GrSurfaceFlag;
desc.fWidth = width;
desc.fHeight = height;
desc.fConfig = kAlpha_8_GrPixelConfig;
@ -512,8 +512,8 @@ GrTexture* GrClipMaskManager::allocMaskTexture(int32_t elementsGenID,
// currently cached mask so it can be reused.
fAACache.reset();
GrTextureDesc desc;
desc.fFlags = willUpload ? kNone_GrTextureFlags : kRenderTarget_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = willUpload ? kNone_GrSurfaceFlags : kRenderTarget_GrSurfaceFlag;
desc.fWidth = clipSpaceIBounds.width();
desc.fHeight = clipSpaceIBounds.height();
desc.fConfig = kRGBA_8888_GrPixelConfig;

View File

@ -261,7 +261,7 @@ GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget,
////////////////////////////////////////////////////////////////////////////////
GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
GrTexture* GrContext::findAndRefTexture(const GrSurfaceDesc& desc,
const GrCacheID& cacheID,
const GrTextureParams* params) {
GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
@ -270,7 +270,7 @@ GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
return static_cast<GrTexture*>(resource);
}
bool GrContext::isTextureInCache(const GrTextureDesc& desc,
bool GrContext::isTextureInCache(const GrSurfaceDesc& desc,
const GrCacheID& cacheID,
const GrTextureParams* params) const {
GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
@ -332,7 +332,7 @@ extern const GrVertexAttrib gVertexAttribs[] = {
// The desired texture is NPOT and tiled but that isn't supported by
// the current hardware. Resize the texture to be a POT
GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc,
const GrCacheID& cacheID,
const void* srcData,
size_t rowBytes,
@ -346,10 +346,10 @@ GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
}
}
GrTextureDesc rtDesc = desc;
GrSurfaceDesc rtDesc = desc;
rtDesc.fFlags = rtDesc.fFlags |
kRenderTarget_GrTextureFlagBit |
kNoStencil_GrTextureFlagBit;
kRenderTarget_GrSurfaceFlag |
kNoStencil_GrSurfaceFlag;
rtDesc.fWidth = GrNextPow2(desc.fWidth);
rtDesc.fHeight = GrNextPow2(desc.fHeight);
@ -384,7 +384,7 @@ GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
// not. Either implement filtered stretch blit on CPU or just create
// one when FBO case fails.
rtDesc.fFlags = kNone_GrTextureFlags;
rtDesc.fFlags = kNone_GrSurfaceFlags;
// no longer need to clamp at min RT size.
rtDesc.fWidth = GrNextPow2(desc.fWidth);
rtDesc.fHeight = GrNextPow2(desc.fHeight);
@ -407,7 +407,7 @@ GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
}
GrTexture* GrContext::createTexture(const GrTextureParams* params,
const GrTextureDesc& desc,
const GrSurfaceDesc& desc,
const GrCacheID& cacheID,
const void* srcData,
size_t rowBytes,
@ -437,7 +437,7 @@ GrTexture* GrContext::createTexture(const GrTextureParams* params,
return texture;
}
GrTexture* GrContext::createNewScratchTexture(const GrTextureDesc& desc) {
GrTexture* GrContext::createNewScratchTexture(const GrSurfaceDesc& desc) {
GrTexture* texture = fGpu->createTexture(desc, NULL, 0);
if (!texture) {
return NULL;
@ -446,24 +446,24 @@ GrTexture* GrContext::createNewScratchTexture(const GrTextureDesc& desc) {
return texture;
}
GrTexture* GrContext::refScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match,
GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& inDesc, ScratchTexMatch match,
bool calledDuringFlush) {
// kNoStencil has no meaning if kRT isn't set.
SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
!(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
SkASSERT((inDesc.fFlags & kRenderTarget_GrSurfaceFlag) ||
!(inDesc.fFlags & kNoStencil_GrSurfaceFlag));
// Make sure caller has checked for renderability if kRT is set.
SkASSERT(!(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
SkASSERT(!(inDesc.fFlags & kRenderTarget_GrSurfaceFlag) ||
this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0));
SkTCopyOnFirstWrite<GrTextureDesc> desc(inDesc);
SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc);
if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrTextureFlagBit)) {
GrTextureFlags origFlags = desc->fFlags;
if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
GrSurfaceFlags origFlags = desc->fFlags;
if (kApprox_ScratchTexMatch == match) {
// bin by pow2 with a reasonable min
static const int MIN_SIZE = 16;
GrTextureDesc* wdesc = desc.writable();
GrSurfaceDesc* wdesc = desc.writable();
wdesc->fWidth = SkTMax(MIN_SIZE, GrNextPow2(desc->fWidth));
wdesc->fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc->fHeight));
}
@ -473,7 +473,7 @@ GrTexture* GrContext::refScratchTexture(const GrTextureDesc& inDesc, ScratchTexM
uint32_t scratchFlags = 0;
if (calledDuringFlush) {
scratchFlags = GrResourceCache2::kRequireNoPendingIO_ScratchFlag;
} else if (!(desc->fFlags & kRenderTarget_GrTextureFlagBit)) {
} else if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
// If it is not a render target then it will most likely be populated by
// writePixels() which will trigger a flush if the texture has pending IO.
scratchFlags = GrResourceCache2::kPreferNoPendingIO_ScratchFlag;
@ -492,8 +492,8 @@ GrTexture* GrContext::refScratchTexture(const GrTextureDesc& inDesc, ScratchTexM
// We no longer try to reuse textures that were previously used as render targets in
// situations where no RT is needed; doing otherwise can confuse the video driver and
// cause significant performance problems in some cases.
if (desc->fFlags & kNoStencil_GrTextureFlagBit) {
desc.writable()->fFlags = desc->fFlags & ~kNoStencil_GrTextureFlagBit;
if (desc->fFlags & kNoStencil_GrSurfaceFlag) {
desc.writable()->fFlags = desc->fFlags & ~kNoStencil_GrSurfaceFlag;
} else {
break;
}
@ -521,10 +521,10 @@ bool GrContext::OverbudgetCB(void* data) {
}
GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
GrTexture* GrContext::createUncachedTexture(const GrSurfaceDesc& descIn,
void* srcData,
size_t rowBytes) {
GrTextureDesc descCopy = descIn;
GrSurfaceDesc descCopy = descIn;
return fGpu->createTexture(descCopy, srcData, rowBytes);
}
@ -1315,7 +1315,7 @@ bool GrContext::writeSurfacePixels(GrSurface* surface,
swapRAndB = true;
}
GrTextureDesc desc;
GrSurfaceDesc desc;
desc.fWidth = width;
desc.fHeight = height;
desc.fConfig = writeConfig;
@ -1452,8 +1452,8 @@ bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
GrTexture* src = target->asTexture();
if (src && (swapRAndB || unpremul || flipY)) {
// Make the scratch a render so we can read its pixels.
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = width;
desc.fHeight = height;
desc.fConfig = readConfig;

View File

@ -194,8 +194,8 @@ static void setup_gamma_texture(GrContext* context, const SkGlyphCache* cache,
SkScalerContext::GetGammaLUTData(contrast, paintGamma, deviceGamma, data.get());
// TODO: Update this to use the cache rather than directly creating a texture.
GrTextureDesc desc;
desc.fFlags = kDynamicUpdate_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kNone_GrSurfaceFlags;
desc.fWidth = width;
desc.fHeight = height;
desc.fConfig = kAlpha_8_GrPixelConfig;

View File

@ -450,7 +450,7 @@ bool GrDrawTarget::setupDstReadIfNecessary(GrDeviceCoordTexture* dstCopy, const
// MSAA consideration: When there is support for reading MSAA samples in the shader we could
// have per-sample dst values by making the copy multisampled.
GrTextureDesc desc;
GrSurfaceDesc desc;
this->initCopySurfaceDstDesc(rt, &desc);
desc.fWidth = copyRect.width();
desc.fHeight = copyRect.height();
@ -995,10 +995,10 @@ bool GrDrawTarget::onCopySurface(GrSurface* dst,
return true;
}
void GrDrawTarget::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) {
void GrDrawTarget::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) {
// Make the dst of the copy be a render target because the default copySurface draws to the dst.
desc->fOrigin = kDefault_GrSurfaceOrigin;
desc->fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit;
desc->fFlags = kRenderTarget_GrSurfaceFlag | kNoStencil_GrSurfaceFlag;
desc->fConfig = src->config();
}

View File

@ -472,7 +472,7 @@ public:
* populate the origin, config, and flags fields of the desc such that copySurface is more
* likely to succeed and be efficient.
*/
virtual void initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc);
virtual void initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc);
/**

View File

@ -61,13 +61,13 @@ void GrGpu::contextAbandoned() {}
////////////////////////////////////////////////////////////////////////////////
GrTexture* GrGpu::createTexture(const GrTextureDesc& desc,
GrTexture* GrGpu::createTexture(const GrSurfaceDesc& desc,
const void* srcData, size_t rowBytes) {
if (!this->caps()->isConfigTexturable(desc.fConfig)) {
return NULL;
}
if ((desc.fFlags & kRenderTarget_GrTextureFlagBit) &&
if ((desc.fFlags & kRenderTarget_GrSurfaceFlag) &&
!this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
return NULL;
}
@ -75,7 +75,7 @@ GrTexture* GrGpu::createTexture(const GrTextureDesc& desc,
GrTexture *tex = NULL;
if (GrPixelConfigIsCompressed(desc.fConfig)) {
// We shouldn't be rendering into this
SkASSERT((desc.fFlags & kRenderTarget_GrTextureFlagBit) == 0);
SkASSERT((desc.fFlags & kRenderTarget_GrSurfaceFlag) == 0);
if (!this->caps()->npotTextureTileSupport() &&
(!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight))) {
@ -88,8 +88,8 @@ GrTexture* GrGpu::createTexture(const GrTextureDesc& desc,
this->handleDirtyContext();
tex = this->onCreateTexture(desc, srcData, rowBytes);
if (tex &&
(kRenderTarget_GrTextureFlagBit & desc.fFlags) &&
!(kNoStencil_GrTextureFlagBit & desc.fFlags)) {
(kRenderTarget_GrSurfaceFlag & desc.fFlags) &&
!(kNoStencil_GrSurfaceFlag & desc.fFlags)) {
SkASSERT(tex->asRenderTarget());
// TODO: defer this and attach dynamically
if (!this->attachStencilBufferToRenderTarget(tex->asRenderTarget())) {

View File

@ -105,7 +105,7 @@ public:
*
* @return The texture object if successful, otherwise NULL.
*/
GrTexture* createTexture(const GrTextureDesc& desc,
GrTexture* createTexture(const GrSurfaceDesc& desc,
const void* srcData, size_t rowBytes);
/**
@ -399,10 +399,10 @@ private:
virtual void onResetContext(uint32_t resetBits) = 0;
// overridden by backend-specific derived class to create objects.
virtual GrTexture* onCreateTexture(const GrTextureDesc& desc,
virtual GrTexture* onCreateTexture(const GrSurfaceDesc& desc,
const void* srcData,
size_t rowBytes) = 0;
virtual GrTexture* onCreateCompressedTexture(const GrTextureDesc& desc,
virtual GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc,
const void* srcData) = 0;
virtual GrTexture* onWrapBackendTexture(const GrBackendTextureDesc&) = 0;
virtual GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&) = 0;

View File

@ -588,7 +588,7 @@ bool GrInOrderDrawBuffer::onCanCopySurface(GrSurface* dst,
return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint);
}
void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) {
void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) {
fDstGpu->initCopySurfaceDstDesc(src, desc);
}

View File

@ -82,7 +82,7 @@ public:
virtual void discard(GrRenderTarget*) SK_OVERRIDE;
virtual void initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) SK_OVERRIDE;
virtual void initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) SK_OVERRIDE;
protected:
virtual void clipWillBeSet(const GrClipData* newClip) SK_OVERRIDE;

View File

@ -104,7 +104,7 @@ void GrLayerCache::initAtlas() {
SkISize textureSize = SkISize::Make(kAtlasTextureWidth, kAtlasTextureHeight);
fAtlas.reset(SkNEW_ARGS(GrAtlas, (fContext->getGpu(), kSkia8888_GrPixelConfig,
kRenderTarget_GrTextureFlagBit,
kRenderTarget_GrSurfaceFlag,
textureSize, kNumPlotsX, kNumPlotsY, false)));
}
@ -162,7 +162,7 @@ GrCachedLayer* GrLayerCache::findLayerOrCreate(uint32_t pictureID,
}
bool GrLayerCache::tryToAtlas(GrCachedLayer* layer,
const GrTextureDesc& desc,
const GrSurfaceDesc& desc,
bool* needsRendering) {
SkDEBUGCODE(GrAutoValidateLayer avl(fAtlas->getTexture(), layer);)
@ -222,7 +222,7 @@ bool GrLayerCache::tryToAtlas(GrCachedLayer* layer,
return false;
}
bool GrLayerCache::lock(GrCachedLayer* layer, const GrTextureDesc& desc, bool* needsRendering) {
bool GrLayerCache::lock(GrCachedLayer* layer, const GrSurfaceDesc& desc, bool* needsRendering) {
if (layer->locked()) {
// This layer is already locked
*needsRendering = false;

View File

@ -211,7 +211,7 @@ public:
// Attempt to place 'layer' in the atlas. Return true on success; false on failure.
// When true is returned, 'needsRendering' will indicate if the layer must be (re)drawn.
// Additionally, the GPU resources will be locked.
bool tryToAtlas(GrCachedLayer* layer, const GrTextureDesc& desc, bool* needsRendering);
bool tryToAtlas(GrCachedLayer* layer, const GrSurfaceDesc& desc, bool* needsRendering);
// Attempt to lock the GPU resources required for a layer. Return true on success;
// false on failure. When true is returned 'needsRendering' will indicate if the
@ -221,7 +221,7 @@ public:
// Currently, this path always uses a new scratch texture for non-Atlased layers
// and (thus) doesn't cache anything. This can yield a lot of re-rendering.
// TODO: allow rediscovery of free-floating layers that are still in the resource cache.
bool lock(GrCachedLayer* layer, const GrTextureDesc& desc, bool* needsRendering);
bool lock(GrCachedLayer* layer, const GrSurfaceDesc& desc, bool* needsRendering);
// addUse is just here to keep the API symmetric
void addUse(GrCachedLayer* layer) { layer->addUse(); }

View File

@ -33,8 +33,8 @@ static void prepare_for_hoisting(GrLayerCache* layerCache,
layerRect,
combined,
info.fPaint);
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = layerRect.width();
desc.fHeight = layerRect.height();
desc.fConfig = kSkia8888_GrPixelConfig;

View File

@ -181,7 +181,7 @@ void GrResourceCache::notifyPurgable(const GrGpuResource* resource) {
resource->getCacheEntry()->key().isScratch() &&
!fCaps->reuseScratchTextures() &&
!(static_cast<const GrTexture*>(resource)->desc().fFlags &
kRenderTarget_GrTextureFlagBit)) {
kRenderTarget_GrSurfaceFlag)) {
this->deleteResource(resource->getCacheEntry());
}
}

View File

@ -230,7 +230,7 @@ bool GrSWMaskHelper::init(const SkIRect& resultBounds,
* Get a texture (from the texture cache) of the correct size & format.
*/
GrTexture* GrSWMaskHelper::createTexture() {
GrTextureDesc desc;
GrSurfaceDesc desc;
desc.fWidth = fBM.width();
desc.fHeight = fBM.height();
desc.fConfig = kAlpha_8_GrPixelConfig;
@ -251,7 +251,7 @@ GrTexture* GrSWMaskHelper::createTexture() {
return fContext->refScratchTexture(desc, GrContext::kApprox_ScratchTexMatch);
}
void GrSWMaskHelper::sendTextureData(GrTexture *texture, const GrTextureDesc& desc,
void GrSWMaskHelper::sendTextureData(GrTexture *texture, const GrSurfaceDesc& desc,
const void *data, int rowbytes) {
// If we aren't reusing scratch textures we don't need to flush before
// writing since no one else will be using 'texture'
@ -266,7 +266,7 @@ void GrSWMaskHelper::sendTextureData(GrTexture *texture, const GrTextureDesc& de
reuseScratch ? 0 : GrContext::kDontFlush_PixelOpsFlag);
}
void GrSWMaskHelper::compressTextureData(GrTexture *texture, const GrTextureDesc& desc) {
void GrSWMaskHelper::compressTextureData(GrTexture *texture, const GrSurfaceDesc& desc) {
SkASSERT(GrPixelConfigIsCompressed(desc.fConfig));
SkASSERT(fmt_to_config(fCompressedFormat) == desc.fConfig);
@ -283,7 +283,7 @@ void GrSWMaskHelper::compressTextureData(GrTexture *texture, const GrTextureDesc
void GrSWMaskHelper::toTexture(GrTexture *texture) {
SkAutoLockPixels alp(fBM);
GrTextureDesc desc;
GrSurfaceDesc desc;
desc.fWidth = fBM.width();
desc.fHeight = fBM.height();
desc.fConfig = texture->config();

View File

@ -125,12 +125,12 @@ private:
// Actually sends the texture data to the GPU. This is called from
// toTexture with the data filled in depending on the texture config.
void sendTextureData(GrTexture *texture, const GrTextureDesc& desc,
void sendTextureData(GrTexture *texture, const GrSurfaceDesc& desc,
const void *data, int rowbytes);
// Compresses the bitmap stored in fBM and sends the compressed data
// to the GPU to be stored in 'texture' using sendTextureData.
void compressTextureData(GrTexture *texture, const GrTextureDesc& desc);
void compressTextureData(GrTexture *texture, const GrSurfaceDesc& desc);
typedef SkNoncopyable INHERITED;
};

View File

@ -126,7 +126,7 @@ GrPlot* GrFontCache::addToAtlas(GrMaskFormat format, GrAtlas::ClientPlotUsage* u
if (NULL == fAtlases[atlasIndex]) {
SkISize textureSize = SkISize::Make(GR_ATLAS_TEXTURE_WIDTH,
GR_ATLAS_TEXTURE_HEIGHT);
fAtlases[atlasIndex] = SkNEW_ARGS(GrAtlas, (fGpu, config, kNone_GrTextureFlags,
fAtlases[atlasIndex] = SkNEW_ARGS(GrAtlas, (fGpu, config, kNone_GrSurfaceFlags,
textureSize,
GR_NUM_PLOTS_X,
GR_NUM_PLOTS_Y,

View File

@ -68,18 +68,18 @@ void GrTexture::onAbandon() {
void GrTexture::validateDesc() const {
if (this->asRenderTarget()) {
// This texture has a render target
SkASSERT(0 != (fDesc.fFlags & kRenderTarget_GrTextureFlagBit));
SkASSERT(0 != (fDesc.fFlags & kRenderTarget_GrSurfaceFlag));
if (this->asRenderTarget()->getStencilBuffer()) {
SkASSERT(0 != (fDesc.fFlags & kNoStencil_GrTextureFlagBit));
SkASSERT(0 != (fDesc.fFlags & kNoStencil_GrSurfaceFlag));
} else {
SkASSERT(0 == (fDesc.fFlags & kNoStencil_GrTextureFlagBit));
SkASSERT(0 == (fDesc.fFlags & kNoStencil_GrSurfaceFlag));
}
SkASSERT(fDesc.fSampleCnt == this->asRenderTarget()->numSamples());
} else {
SkASSERT(0 == (fDesc.fFlags & kRenderTarget_GrTextureFlagBit));
SkASSERT(0 == (fDesc.fFlags & kNoStencil_GrTextureFlagBit));
SkASSERT(0 == (fDesc.fFlags & kRenderTarget_GrSurfaceFlag));
SkASSERT(0 == (fDesc.fFlags & kNoStencil_GrSurfaceFlag));
SkASSERT(0 == fDesc.fSampleCnt);
}
}
@ -104,7 +104,7 @@ enum TextureFlags {
namespace {
GrResourceKey::ResourceFlags get_texture_flags(const GrGpu* gpu,
const GrTextureParams* params,
const GrTextureDesc& desc) {
const GrSurfaceDesc& desc) {
GrResourceKey::ResourceFlags flags = 0;
bool tiled = params && params->isTiled();
if (tiled && !gpu->caps()->npotTextureTileSupport()) {
@ -124,11 +124,11 @@ GrResourceKey::ResourceFlags get_texture_flags(const GrGpu* gpu,
}
// FIXME: This should be refactored with the code in gl/GrGpuGL.cpp.
GrSurfaceOrigin resolve_origin(const GrTextureDesc& desc) {
GrSurfaceOrigin resolve_origin(const GrSurfaceDesc& desc) {
// By default, GrRenderTargets are GL's normal orientation so that they
// can be drawn to by the outside world without the client having
// to render upside down.
bool renderTarget = 0 != (desc.fFlags & kRenderTarget_GrTextureFlagBit);
bool renderTarget = 0 != (desc.fFlags & kRenderTarget_GrSurfaceFlag);
if (kDefault_GrSurfaceOrigin == desc.fOrigin) {
return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin;
} else {
@ -138,7 +138,7 @@ GrSurfaceOrigin resolve_origin(const GrTextureDesc& desc) {
}
//////////////////////////////////////////////////////////////////////////////
GrTexture::GrTexture(GrGpu* gpu, bool isWrapped, const GrTextureDesc& desc)
GrTexture::GrTexture(GrGpu* gpu, bool isWrapped, const GrSurfaceDesc& desc)
: INHERITED(gpu, isWrapped, desc)
, fRenderTarget(NULL)
, fMipMapsStatus(kNotAllocated_MipMapsStatus) {
@ -150,13 +150,13 @@ GrTexture::GrTexture(GrGpu* gpu, bool isWrapped, const GrTextureDesc& desc)
GrResourceKey GrTexturePriv::ComputeKey(const GrGpu* gpu,
const GrTextureParams* params,
const GrTextureDesc& desc,
const GrSurfaceDesc& desc,
const GrCacheID& cacheID) {
GrResourceKey::ResourceFlags flags = get_texture_flags(gpu, params, desc);
return GrResourceKey(cacheID, ResourceType(), flags);
}
GrResourceKey GrTexturePriv::ComputeScratchKey(const GrTextureDesc& desc) {
GrResourceKey GrTexturePriv::ComputeScratchKey(const GrSurfaceDesc& desc) {
GrCacheID::Key idKey;
// Instead of a client-provided key of the texture contents we create a key from the
// descriptor.

View File

@ -17,15 +17,15 @@
implemented privately in GrTexture with a inline public method here). */
class GrTexturePriv {
public:
void setFlag(GrTextureFlags flags) {
void setFlag(GrSurfaceFlags flags) {
fTexture->fDesc.fFlags = fTexture->fDesc.fFlags | flags;
}
void resetFlag(GrTextureFlags flags) {
void resetFlag(GrSurfaceFlags flags) {
fTexture->fDesc.fFlags = fTexture->fDesc.fFlags & ~flags;
}
bool isSetFlag(GrTextureFlags flags) const {
bool isSetFlag(GrSurfaceFlags flags) const {
return 0 != (fTexture->fDesc.fFlags & flags);
}
@ -46,9 +46,9 @@ public:
static GrResourceKey ComputeKey(const GrGpu* gpu,
const GrTextureParams* params,
const GrTextureDesc& desc,
const GrSurfaceDesc& desc,
const GrCacheID& cacheID);
static GrResourceKey ComputeScratchKey(const GrTextureDesc& desc);
static GrResourceKey ComputeScratchKey(const GrSurfaceDesc& desc);
static bool NeedsResizing(const GrResourceKey& key);
static bool NeedsBilerp(const GrResourceKey& key);

View File

@ -171,8 +171,8 @@ SkGpuDevice* SkGpuDevice::Create(GrContext* context, const SkImageInfo& origInfo
}
const SkImageInfo info = SkImageInfo::Make(origInfo.width(), origInfo.height(), ct, at);
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = info.width();
desc.fHeight = info.height();
desc.fConfig = SkImageInfo2GrPixelConfig(info);
@ -607,7 +607,7 @@ bool draw_with_mask_filter(GrContext* context, const SkPath& devPath,
// we now have a device-aligned 8bit mask in dstM, ready to be drawn using
// the current clip (and identity matrix) and GrPaint settings
GrTextureDesc desc;
GrSurfaceDesc desc;
desc.fWidth = dstM.fBounds.width();
desc.fHeight = dstM.fBounds.height();
desc.fConfig = kAlpha_8_GrPixelConfig;
@ -632,8 +632,8 @@ GrTexture* create_mask_GPU(GrContext* context,
const GrStrokeInfo& strokeInfo,
bool doAA,
int sampleCnt) {
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = SkScalarCeilToInt(maskRect.width());
desc.fHeight = SkScalarCeilToInt(maskRect.height());
desc.fSampleCnt = doAA ? sampleCnt : 0;
@ -1751,9 +1751,9 @@ void SkGpuDevice::flush() {
///////////////////////////////////////////////////////////////////////////////
SkBaseDevice* SkGpuDevice::onCreateDevice(const SkImageInfo& info, Usage usage) {
GrTextureDesc desc;
GrSurfaceDesc desc;
desc.fConfig = fRenderTarget->config();
desc.fFlags = kRenderTarget_GrTextureFlagBit;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = info.width();
desc.fHeight = info.height();
desc.fSampleCnt = fRenderTarget->numSamples();

View File

@ -105,8 +105,8 @@ static void generate_bitmap_cache_id(const SkBitmap& bitmap, GrCacheID* id) {
id->reset(gBitmapTextureDomain, key);
}
static void generate_bitmap_texture_desc(const SkBitmap& bitmap, GrTextureDesc* desc) {
desc->fFlags = kNone_GrTextureFlags;
static void generate_bitmap_texture_desc(const SkBitmap& bitmap, GrSurfaceDesc* desc) {
desc->fFlags = kNone_GrSurfaceFlags;
desc->fWidth = bitmap.width();
desc->fHeight = bitmap.height();
desc->fConfig = SkImageInfo2GrPixelConfig(bitmap.info());
@ -139,7 +139,7 @@ static GrTexture* sk_gr_allocate_texture(GrContext* ctx,
bool cache,
const GrTextureParams* params,
const SkBitmap& bm,
GrTextureDesc desc,
GrSurfaceDesc desc,
const void* pixels,
size_t rowBytes) {
GrTexture* result;
@ -170,7 +170,7 @@ static GrTexture* sk_gr_allocate_texture(GrContext* ctx,
#ifndef SK_IGNORE_ETC1_SUPPORT
static GrTexture *load_etc1_texture(GrContext* ctx, bool cache,
const GrTextureParams* params,
const SkBitmap &bm, GrTextureDesc desc) {
const SkBitmap &bm, GrSurfaceDesc desc) {
SkAutoTUnref<SkData> data(bm.pixelRef()->refEncodedData());
// Is this even encoded data?
@ -219,7 +219,7 @@ static GrTexture *load_etc1_texture(GrContext* ctx, bool cache,
#endif // SK_IGNORE_ETC1_SUPPORT
static GrTexture *load_yuv_texture(GrContext* ctx, bool cache, const GrTextureParams* params,
const SkBitmap& bm, const GrTextureDesc& desc) {
const SkBitmap& bm, const GrSurfaceDesc& desc) {
// Subsets are not supported, the whole pixelRef is loaded when using YUV decoding
if ((bm.pixelRef()->info().width() != bm.info().width()) ||
(bm.pixelRef()->info().height() != bm.info().height())) {
@ -252,7 +252,7 @@ static GrTexture *load_yuv_texture(GrContext* ctx, bool cache, const GrTexturePa
return NULL;
}
GrTextureDesc yuvDesc;
GrSurfaceDesc yuvDesc;
yuvDesc.fConfig = kAlpha_8_GrPixelConfig;
SkAutoTUnref<GrTexture> yuvTextures[3];
for (int i = 0; i < 3; ++i) {
@ -267,10 +267,10 @@ static GrTexture *load_yuv_texture(GrContext* ctx, bool cache, const GrTexturePa
}
}
GrTextureDesc rtDesc = desc;
GrSurfaceDesc rtDesc = desc;
rtDesc.fFlags = rtDesc.fFlags |
kRenderTarget_GrTextureFlagBit |
kNoStencil_GrTextureFlagBit;
kRenderTarget_GrSurfaceFlag |
kNoStencil_GrSurfaceFlag;
GrTexture* result = sk_gr_allocate_texture(ctx, cache, params, bm, rtDesc, NULL, 0);
@ -302,7 +302,7 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
const SkBitmap* bitmap = &origBitmap;
GrTextureDesc desc;
GrSurfaceDesc desc;
generate_bitmap_texture_desc(*bitmap, &desc);
if (kIndex_8_SkColorType == bitmap->colorType()) {
@ -368,7 +368,7 @@ bool GrIsBitmapInCache(const GrContext* ctx,
GrCacheID cacheID;
generate_bitmap_cache_id(bitmap, &cacheID);
GrTextureDesc desc;
GrSurfaceDesc desc;
generate_bitmap_texture_desc(bitmap, &desc);
return ctx->isTextureInCache(desc, cacheID, params);
}
@ -386,7 +386,7 @@ GrTexture* GrRefCachedBitmapTexture(GrContext* ctx,
GrCacheID cacheID;
generate_bitmap_cache_id(bitmap, &cacheID);
GrTextureDesc desc;
GrSurfaceDesc desc;
generate_bitmap_texture_desc(bitmap, &desc);
result = ctx->findAndRefTexture(desc, cacheID, params);

View File

@ -62,7 +62,7 @@ static SkGrPixelRef* copy_to_new_texture_pixelref(GrTexture* texture, SkColorTyp
if (NULL == context) {
return NULL;
}
GrTextureDesc desc;
GrSurfaceDesc desc;
SkIRect srcRect;
@ -77,7 +77,7 @@ static SkGrPixelRef* copy_to_new_texture_pixelref(GrTexture* texture, SkColorTyp
desc.fHeight = subset->height();
srcRect = *subset;
}
desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit;
desc.fFlags = kRenderTarget_GrSurfaceFlag | kNoStencil_GrSurfaceFlag;
desc.fConfig = SkImageInfo2GrPixelConfig(dstCT, kPremul_SkAlphaType);
GrTexture* dst = context->createUncachedTexture(desc, NULL, 0);

View File

@ -174,9 +174,9 @@ void GrConfigConversionEffect::TestForPreservingPMConversions(GrContext* context
}
}
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit |
kNoStencil_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag |
kNoStencil_GrSurfaceFlag;
desc.fWidth = 256;
desc.fHeight = 256;
desc.fConfig = kRGBA_8888_GrPixelConfig;
@ -189,7 +189,7 @@ void GrConfigConversionEffect::TestForPreservingPMConversions(GrContext* context
if (!tempTex.get()) {
return;
}
desc.fFlags = kNone_GrTextureFlags;
desc.fFlags = kNone_GrSurfaceFlags;
SkAutoTUnref<GrTexture> dataTex(context->createUncachedTexture(desc, data, 0));
if (!dataTex.get()) {
return;

View File

@ -191,7 +191,7 @@ GrTextureStripAtlas::AtlasRow* GrTextureStripAtlas::getLRU() {
void GrTextureStripAtlas::lockTexture() {
GrTextureParams params;
GrTextureDesc texDesc;
GrSurfaceDesc texDesc;
texDesc.fWidth = fDesc.fWidth;
texDesc.fHeight = fDesc.fHeight;
texDesc.fConfig = fDesc.fConfig;

View File

@ -25,11 +25,11 @@ void GrGLRenderTarget::init(const Desc& desc,
}
namespace {
GrTextureDesc MakeDesc(GrTextureFlags flags,
GrSurfaceDesc MakeDesc(GrSurfaceFlags flags,
int width, int height,
GrPixelConfig config, int sampleCnt,
GrSurfaceOrigin origin) {
GrTextureDesc temp;
GrSurfaceDesc temp;
temp.fFlags = flags;
temp.fWidth = width;
temp.fHeight = height;
@ -49,7 +49,7 @@ GrGLRenderTarget::GrGLRenderTarget(GrGpuGL* gpu,
: INHERITED(gpu,
desc.fIsWrapped,
texture,
MakeDesc(kNone_GrTextureFlags,
MakeDesc(kNone_GrSurfaceFlags,
viewport.fWidth, viewport.fHeight,
desc.fConfig, desc.fSampleCnt,
desc.fOrigin)) {
@ -72,7 +72,7 @@ GrGLRenderTarget::GrGLRenderTarget(GrGpuGL* gpu,
: INHERITED(gpu,
desc.fIsWrapped,
NULL,
MakeDesc(kNone_GrTextureFlags,
MakeDesc(kNone_GrSurfaceFlags,
viewport.fWidth, viewport.fHeight,
desc.fConfig, desc.fSampleCnt,
desc.fOrigin)) {

View File

@ -57,7 +57,7 @@ public:
void invalidate() { memset(this, 0xff, sizeof(TexParams)); }
};
struct Desc : public GrTextureDesc {
struct Desc : public GrSurfaceDesc {
GrGLuint fTextureID;
bool fIsWrapped;
};

View File

@ -376,7 +376,7 @@ GrTexture* GrGpuGL::onWrapBackendTexture(const GrBackendTextureDesc& desc) {
GrGLTexture::Desc glTexDesc;
// next line relies on GrBackendTextureDesc's flags matching GrTexture's
glTexDesc.fFlags = (GrTextureFlags) desc.fFlags;
glTexDesc.fFlags = (GrSurfaceFlags) desc.fFlags;
glTexDesc.fWidth = desc.fWidth;
glTexDesc.fHeight = desc.fHeight;
glTexDesc.fConfig = desc.fConfig;
@ -530,8 +530,8 @@ bool adjust_pixel_ops_params(int surfaceWidth,
return true;
}
GrGLenum check_alloc_error(const GrTextureDesc& desc, const GrGLInterface* interface) {
if (SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit)) {
GrGLenum check_alloc_error(const GrSurfaceDesc& desc, const GrGLInterface* interface) {
if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) {
return GR_GL_GET_ERROR(interface);
} else {
return CHECK_ALLOC_ERROR(interface);
@ -941,7 +941,7 @@ static size_t as_size_t(int x) {
}
#endif
GrTexture* GrGpuGL::onCreateTexture(const GrTextureDesc& desc,
GrTexture* GrGpuGL::onCreateTexture(const GrSurfaceDesc& desc,
const void* srcData,
size_t rowBytes) {
@ -969,9 +969,9 @@ GrTexture* GrGpuGL::onCreateTexture(const GrTextureDesc& desc,
glRTDesc.fTexFBOID = 0;
glRTDesc.fIsWrapped = false;
glRTDesc.fConfig = glTexDesc.fConfig;
glRTDesc.fCheckAllocation = SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit);
glRTDesc.fCheckAllocation = SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag);
bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrTextureFlagBit);
bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
glRTDesc.fOrigin = glTexDesc.fOrigin;
@ -1064,10 +1064,10 @@ GrTexture* GrGpuGL::onCreateTexture(const GrTextureDesc& desc,
return tex;
}
GrTexture* GrGpuGL::onCreateCompressedTexture(const GrTextureDesc& desc,
GrTexture* GrGpuGL::onCreateCompressedTexture(const GrSurfaceDesc& desc,
const void* srcData) {
if(SkToBool(desc.fFlags & kRenderTarget_GrTextureFlagBit)) {
if(SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag)) {
return return_null_texture();
}
@ -2411,7 +2411,7 @@ GrGLuint GrGpuGL::bindSurfaceAsFBO(GrSurface* surface, GrGLenum fboTarget, GrGLI
return tempFBOID;
}
void GrGpuGL::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) {
void GrGpuGL::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) {
// Check for format issues with glCopyTexSubImage2D
if (kGLES_GrGLStandard == this->glStandard() && this->glCaps().bgraIsInternalFormat() &&
kBGRA_8888_GrPixelConfig == src->config()) {
@ -2433,7 +2433,7 @@ void GrGpuGL::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc)
} else {
desc->fConfig = src->config();
desc->fOrigin = src->origin();
desc->fFlags = kNone_GrTextureFlags;
desc->fFlags = kNone_GrSurfaceFlags;
}
}

View File

@ -68,7 +68,7 @@ public:
size_t rowBytes) const SK_OVERRIDE;
virtual bool fullReadPixelsIsFasterThanPartial() const SK_OVERRIDE;
virtual void initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) SK_OVERRIDE;
virtual void initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) SK_OVERRIDE;
// These functions should be used to bind GL objects. They track the GL state and skip redundant
// bindings. Making the equivalent glBind calls directly will confuse the state tracking.
@ -109,10 +109,10 @@ private:
// GrGpu overrides
virtual void onResetContext(uint32_t resetBits) SK_OVERRIDE;
virtual GrTexture* onCreateTexture(const GrTextureDesc& desc,
virtual GrTexture* onCreateTexture(const GrSurfaceDesc& desc,
const void* srcData,
size_t rowBytes) SK_OVERRIDE;
virtual GrTexture* onCreateCompressedTexture(const GrTextureDesc& desc,
virtual GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc,
const void* srcData) SK_OVERRIDE;
virtual GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) SK_OVERRIDE;
virtual GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) SK_OVERRIDE;

View File

@ -118,8 +118,8 @@ SkSurface* SkSurface::NewRenderTarget(GrContext* ctx, const SkImageInfo& info, i
return NULL;
}
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit | kCheckAllocation_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag | kCheckAllocation_GrSurfaceFlag;
desc.fWidth = info.width();
desc.fHeight = info.height();
desc.fConfig = SkImageInfo2GrPixelConfig(info);
@ -139,8 +139,8 @@ SkSurface* SkSurface::NewScratchRenderTarget(GrContext* ctx, const SkImageInfo&
return NULL;
}
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit | kCheckAllocation_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag | kCheckAllocation_GrSurfaceFlag;
desc.fWidth = info.width();
desc.fHeight = info.height();
desc.fConfig = SkImageInfo2GrPixelConfig(info);

View File

@ -284,9 +284,9 @@ static bool gpu_blur_path(GrContextFactory* factory, const SkPath& path,
return false;
}
GrTextureDesc desc;
GrSurfaceDesc desc;
desc.fConfig = kSkia8888_GrPixelConfig;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = resultCount;
desc.fHeight = 30;
desc.fSampleCnt = 0;

View File

@ -22,10 +22,10 @@ static GrTexture* createTexture(GrContext* context) {
memset(textureData, 0, 4* X_SIZE * Y_SIZE);
GrTextureDesc desc;
GrSurfaceDesc desc;
// let Skia know we will be using this texture as a render target
desc.fFlags = kRenderTarget_GrTextureFlagBit;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fConfig = kSkia8888_GrPixelConfig;
desc.fWidth = X_SIZE;
desc.fHeight = Y_SIZE;
@ -46,8 +46,8 @@ static void test_clip_bounds(skiatest::Reporter* reporter, GrContext* context) {
static const int kXSize = 100;
static const int kYSize = 100;
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fConfig = kAlpha_8_GrPixelConfig;
desc.fWidth = kXSize;
desc.fHeight = kYSize;
@ -148,8 +148,8 @@ static void test_cache(skiatest::Reporter* reporter, GrContext* context) {
SkClipStack clip1(bound1);
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = X_SIZE;
desc.fHeight = Y_SIZE;
desc.fConfig = kSkia8888_GrPixelConfig;

View File

@ -39,8 +39,8 @@ DEF_GPUTEST(FloatingPointTextureTest, reporter, factory) {
int glCtxTypeCnt = 1;
glCtxTypeCnt = GrContextFactory::kGLContextTypeCnt;
for (int glCtxType = 0; glCtxType < glCtxTypeCnt; ++glCtxType) {
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = DEV_W;
desc.fHeight = DEV_H;
desc.fConfig = kRGBA_float_GrPixelConfig;

View File

@ -103,10 +103,10 @@ static GrRenderTarget* random_render_target(GrGpuGL* gpu,
SkRandom* random) {
// setup render target
GrTextureParams params;
GrTextureDesc texDesc;
GrSurfaceDesc texDesc;
texDesc.fWidth = kRenderTargetWidth;
texDesc.fHeight = kRenderTargetHeight;
texDesc.fFlags = kRenderTarget_GrTextureFlagBit;
texDesc.fFlags = kRenderTarget_GrSurfaceFlag;
texDesc.fConfig = kRGBA_8888_GrPixelConfig;
texDesc.fOrigin = random->nextBool() == true ? kTopLeft_GrSurfaceOrigin :
kBottomLeft_GrSurfaceOrigin;
@ -375,13 +375,13 @@ static void set_random_stencil(GrGpuGL* gpu, SkRandom* random) {
bool GrGpuGL::programUnitTest(int maxStages) {
// setup dummy textures
GrTextureDesc dummyDesc;
dummyDesc.fFlags = kRenderTarget_GrTextureFlagBit;
GrSurfaceDesc dummyDesc;
dummyDesc.fFlags = kRenderTarget_GrSurfaceFlag;
dummyDesc.fConfig = kSkia8888_GrPixelConfig;
dummyDesc.fWidth = 34;
dummyDesc.fHeight = 18;
SkAutoTUnref<GrTexture> dummyTexture1(this->createTexture(dummyDesc, NULL, 0));
dummyDesc.fFlags = kNone_GrTextureFlags;
dummyDesc.fFlags = kNone_GrSurfaceFlags;
dummyDesc.fConfig = kAlpha_8_GrPixelConfig;
dummyDesc.fWidth = 16;
dummyDesc.fHeight = 22;

View File

@ -61,7 +61,7 @@ static void lock_layer(skiatest::Reporter* reporter,
GrLayerCache* cache,
GrCachedLayer* layer) {
// Make the layer 512x512 (so it can be atlased)
GrTextureDesc desc;
GrSurfaceDesc desc;
desc.fWidth = 512;
desc.fHeight = 512;
desc.fConfig = kSkia8888_GrPixelConfig;

View File

@ -18,15 +18,15 @@
DEF_GPUTEST(GrSurface, reporter, factory) {
GrContext* context = factory->get(GrContextFactory::kNull_GLContextType);
if (context) {
GrTextureDesc desc;
GrSurfaceDesc desc;
desc.fConfig = kSkia8888_GrPixelConfig;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = 256;
desc.fHeight = 256;
desc.fSampleCnt = 0;
GrSurface* texRT1 = context->createUncachedTexture(desc, NULL, 0);
GrSurface* texRT2 = context->createUncachedTexture(desc, NULL, 0);
desc.fFlags = kNone_GrTextureFlags;
desc.fFlags = kNone_GrSurfaceFlags;
GrSurface* tex1 = context->createUncachedTexture(desc, NULL, 0);
REPORTER_ASSERT(reporter, texRT1->surfacePriv().isSameAs(texRT1));

View File

@ -312,8 +312,8 @@ DEF_GPUTEST(ReadPixels, reporter, factory) {
if (NULL == context) {
continue;
}
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag | kNoStencil_GrSurfaceFlag;
desc.fWidth = DEV_W;
desc.fHeight = DEV_H;
desc.fConfig = kSkia8888_GrPixelConfig;

View File

@ -30,10 +30,10 @@ DEF_GPUTEST(ReadWriteAlpha, reporter, factory) {
memset(textureData, 0, X_SIZE * Y_SIZE);
GrTextureDesc desc;
GrSurfaceDesc desc;
// let Skia know we will be using this texture as a render target
desc.fFlags = kRenderTarget_GrTextureFlagBit;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
// it is a single channel texture
desc.fConfig = kAlpha_8_GrPixelConfig;
desc.fWidth = X_SIZE;

View File

@ -263,9 +263,9 @@ DEF_GPUTEST(ResourceCache, reporter, factory) {
continue;
}
GrTextureDesc desc;
GrSurfaceDesc desc;
desc.fConfig = kSkia8888_GrPixelConfig;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = gWidth;
desc.fHeight = gHeight;
SkImageInfo info = SkImageInfo::MakeN32Premul(gWidth, gHeight);

View File

@ -312,8 +312,8 @@ static SkSurface* create_surface(const CanvasConfig& c, GrContext* grCtx) {
#if SK_SUPPORT_GPU
case kGpu_BottomLeft_DevType:
case kGpu_TopLeft_DevType:
GrTextureDesc desc;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = DEV_W;
desc.fHeight = DEV_H;
desc.fConfig = kSkia8888_GrPixelConfig;

View File

@ -148,9 +148,9 @@ SkCanvas* PictureRenderer::setupCanvas(int width, int height) {
SkAutoTUnref<GrSurface> target;
if (fGrContext) {
// create a render target to back the device
GrTextureDesc desc;
GrSurfaceDesc desc;
desc.fConfig = kSkia8888_GrPixelConfig;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = width;
desc.fHeight = height;
desc.fSampleCnt = fSampleCount;