Creating functions for uploading a mipmapped texture.
BUG=476416 GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1249543003 Review URL: https://codereview.chromium.org/1249543003
This commit is contained in:
parent
e015c2646f
commit
55f2d2d57f
@ -45,7 +45,7 @@ public:
|
||||
inline const GrTexturePriv texturePriv() const;
|
||||
|
||||
protected:
|
||||
GrTexture(GrGpu*, LifeCycle, const GrSurfaceDesc&);
|
||||
GrTexture(GrGpu*, LifeCycle, const GrSurfaceDesc&, bool wasMipMapDataProvided);
|
||||
|
||||
void validateDesc() const;
|
||||
|
||||
@ -60,6 +60,7 @@ private:
|
||||
};
|
||||
|
||||
MipMapsStatus fMipMapsStatus;
|
||||
int fMaxMipMapLevel;
|
||||
|
||||
friend class GrTexturePriv;
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
#define GrTextureProvider_DEFINED
|
||||
|
||||
#include "GrTexture.h"
|
||||
#include "GrTypes.h"
|
||||
#include "SkImageFilter.h"
|
||||
|
||||
class GrSingleOwner;
|
||||
@ -22,8 +23,18 @@ public:
|
||||
* Creates a new texture in the resource cache and returns it. The caller owns a
|
||||
* ref on the returned texture which must be balanced by a call to unref.
|
||||
*
|
||||
* @param desc Description of the texture properties.
|
||||
* @param budgeted Does the texture count against the resource cache budget?
|
||||
* @param desc Description of the texture properties.
|
||||
* @param budgeted Does the texture count against the resource cache budget?
|
||||
* @param texels A contiguous array of mipmap levels
|
||||
* @param mipLevelCount The amount of elements in the texels array
|
||||
*/
|
||||
GrTexture* createMipMappedTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
|
||||
const GrMipLevel* texels, int mipLevelCount);
|
||||
|
||||
/**
|
||||
* This function is a shim which creates a SkTArray<GrMipLevel> of size 1.
|
||||
* It then calls createTexture with that SkTArray.
|
||||
*
|
||||
* @param srcData Pointer to the pixel values (optional).
|
||||
* @param rowBytes The number of bytes between rows of the texture. Zero
|
||||
* implies tightly packed rows. For compressed pixel configs, this
|
||||
|
@ -8,9 +8,9 @@
|
||||
#ifndef GrTypes_DEFINED
|
||||
#define GrTypes_DEFINED
|
||||
|
||||
#include "SkMath.h"
|
||||
#include "SkTypes.h"
|
||||
#include "GrConfig.h"
|
||||
#include "SkMath.h"
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
@ -425,6 +425,11 @@ enum GrSurfaceOrigin {
|
||||
kBottomLeft_GrSurfaceOrigin,
|
||||
};
|
||||
|
||||
struct GrMipLevel {
|
||||
const void* fPixels;
|
||||
size_t fRowBytes;
|
||||
};
|
||||
|
||||
/**
|
||||
* An container of function pointers which consumers of Skia can fill in and
|
||||
* pass to Skia. Skia will use these function pointers in place of its backend
|
||||
@ -487,7 +492,8 @@ struct GrSurfaceDesc {
|
||||
, fWidth(0)
|
||||
, fHeight(0)
|
||||
, fConfig(kUnknown_GrPixelConfig)
|
||||
, fSampleCnt(0) {
|
||||
, fSampleCnt(0)
|
||||
, fIsMipMapped(false) {
|
||||
}
|
||||
|
||||
GrSurfaceFlags fFlags; //!< bitfield of TextureFlags
|
||||
@ -515,6 +521,8 @@ struct GrSurfaceDesc {
|
||||
* usual texture creation method (e.g. TexImage2D in OpenGL).
|
||||
*/
|
||||
GrTextureStorageAllocator fTextureStorageAllocator;
|
||||
|
||||
bool fIsMipMapped; //!< Indicates if the texture has mipmaps
|
||||
};
|
||||
|
||||
// Legacy alias
|
||||
|
@ -239,7 +239,8 @@ static GrTexture* set_key_and_return(GrTexture* tex, const GrUniqueKey& key) {
|
||||
* 5. Ask the generator to return RGB(A) data, which the GPU can convert
|
||||
*/
|
||||
GrTexture* SkImageCacherator::lockTexture(GrContext* ctx, const GrUniqueKey& key,
|
||||
const SkImage* client, SkImage::CachingHint chint) {
|
||||
const SkImage* client, SkImage::CachingHint chint,
|
||||
bool willBeMipped) {
|
||||
// Values representing the various texture lock paths we can take. Used for logging the path
|
||||
// taken to a histogram.
|
||||
enum LockTexturePath {
|
||||
@ -301,7 +302,12 @@ GrTexture* SkImageCacherator::lockTexture(GrContext* ctx, const GrUniqueKey& key
|
||||
// 5. Ask the generator to return RGB(A) data, which the GPU can convert
|
||||
SkBitmap bitmap;
|
||||
if (this->tryLockAsBitmap(&bitmap, client, chint)) {
|
||||
GrTexture* tex = GrUploadBitmapToTexture(ctx, bitmap);
|
||||
GrTexture* tex = nullptr;
|
||||
if (willBeMipped) {
|
||||
tex = GrGenerateMipMapsAndUploadToTexture(ctx, bitmap);
|
||||
} else {
|
||||
tex = GrUploadBitmapToTexture(ctx, bitmap);
|
||||
}
|
||||
if (tex) {
|
||||
SK_HISTOGRAM_ENUMERATION("LockTexturePath", kRGBA_LockTexturePath,
|
||||
kLockTexturePathCount);
|
||||
|
@ -75,7 +75,7 @@ private:
|
||||
// Returns the texture. If the cacherator is generating the texture and wants to cache it,
|
||||
// it should use the passed in key (if the key is valid).
|
||||
GrTexture* lockTexture(GrContext*, const GrUniqueKey& key, const SkImage* client,
|
||||
SkImage::CachingHint);
|
||||
SkImage::CachingHint, bool willBeMipped);
|
||||
#endif
|
||||
|
||||
class ScopedGenerator {
|
||||
|
@ -233,7 +233,7 @@ SkMipMap* SkMipMap::Build(const SkPixmap& src, SkDiscardableFactoryProc fact) {
|
||||
|
||||
// whip through our loop to compute the exact size needed
|
||||
size_t size = 0;
|
||||
int countLevels = 0;
|
||||
int countLevels = 0;
|
||||
{
|
||||
int width = src.width();
|
||||
int height = src.height();
|
||||
|
@ -782,6 +782,7 @@ GrTexture* GrRectBlurEffect::CreateBlurProfileTexture(GrTextureProvider* texture
|
||||
texDesc.fWidth = profileSize;
|
||||
texDesc.fHeight = 1;
|
||||
texDesc.fConfig = kAlpha_8_GrPixelConfig;
|
||||
texDesc.fIsMipMapped = false;
|
||||
|
||||
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
|
||||
GrUniqueKey key;
|
||||
@ -997,6 +998,7 @@ const GrFragmentProcessor* GrRRectBlurEffect::Create(GrTextureProvider* texProvi
|
||||
texDesc.fWidth = texSide;
|
||||
texDesc.fHeight = texSide;
|
||||
texDesc.fConfig = kAlpha_8_GrPixelConfig;
|
||||
texDesc.fIsMipMapped = false;
|
||||
|
||||
blurNinePatchTexture.reset(
|
||||
texProvider->createTexture(texDesc, SkBudgeted::kYes , blurredMask.fImage, 0));
|
||||
|
@ -310,6 +310,7 @@ const GrFragmentProcessor* SkColorCubeFilter::asFragmentProcessor(GrContext* con
|
||||
desc.fWidth = fCache.cubeDimension();
|
||||
desc.fHeight = fCache.cubeDimension() * fCache.cubeDimension();
|
||||
desc.fConfig = kRGBA_8888_GrPixelConfig;
|
||||
desc.fIsMipMapped = false;
|
||||
|
||||
SkAutoTUnref<GrTexture> textureCube(
|
||||
context->textureProvider()->findAndRefTextureByUniqueKey(key));
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "GrTransferBuffer.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
#include "GrVertices.h"
|
||||
#include "SkTypes.h"
|
||||
|
||||
GrVertices& GrVertices::operator =(const GrVertices& di) {
|
||||
fPrimitiveType = di.fPrimitiveType;
|
||||
@ -88,76 +89,107 @@ static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget)
|
||||
}
|
||||
}
|
||||
|
||||
GrTexture* GrGpu::createTexture(const GrSurfaceDesc& origDesc, SkBudgeted budgeted,
|
||||
const void* srcData, size_t rowBytes) {
|
||||
GrSurfaceDesc desc = origDesc;
|
||||
|
||||
if (!this->caps()->isConfigTexturable(desc.fConfig)) {
|
||||
return nullptr;
|
||||
/**
|
||||
* Prior to creating a texture, make sure the type of texture being created is
|
||||
* supported by calling check_texture_creation_params.
|
||||
*
|
||||
* @param caps The capabilities of the GL device.
|
||||
* @param desc The descriptor of the texture to create.
|
||||
* @param isRT Indicates if the texture can be a render target.
|
||||
*/
|
||||
static bool check_texture_creation_params(const GrCaps& caps, const GrSurfaceDesc& desc,
|
||||
bool* isRT) {
|
||||
if (!caps.isConfigTexturable(desc.fConfig)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isRT = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
|
||||
if (isRT && !this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
|
||||
return nullptr;
|
||||
*isRT = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
|
||||
if (*isRT && !caps.isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We currently do not support multisampled textures
|
||||
if (!isRT && desc.fSampleCnt > 0) {
|
||||
if (!*isRT && desc.fSampleCnt > 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (*isRT) {
|
||||
int maxRTSize = caps.maxRenderTargetSize();
|
||||
if (desc.fWidth > maxRTSize || desc.fHeight > maxRTSize) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
int maxSize = caps.maxTextureSize();
|
||||
if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
GrTexture* GrGpu::createTexture(const GrSurfaceDesc& origDesc, SkBudgeted budgeted,
|
||||
const SkTArray<GrMipLevel>& texels) {
|
||||
GrSurfaceDesc desc = origDesc;
|
||||
|
||||
const GrCaps* caps = this->caps();
|
||||
bool isRT = false;
|
||||
bool textureCreationParamsValid = check_texture_creation_params(*caps, desc, &isRT);
|
||||
if (!textureCreationParamsValid) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
GrTexture *tex = nullptr;
|
||||
|
||||
if (isRT) {
|
||||
int maxRTSize = this->caps()->maxRenderTargetSize();
|
||||
if (desc.fWidth > maxRTSize || desc.fHeight > maxRTSize) {
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
int maxSize = this->caps()->maxTextureSize();
|
||||
if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
GrGpuResource::LifeCycle lifeCycle = SkBudgeted::kYes == budgeted ?
|
||||
GrGpuResource::kCached_LifeCycle :
|
||||
GrGpuResource::kUncached_LifeCycle;
|
||||
|
||||
desc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
|
||||
// Attempt to catch un- or wrongly initialized sample counts;
|
||||
desc.fSampleCnt = SkTMin(desc.fSampleCnt, caps->maxSampleCount());
|
||||
// Attempt to catch un- or wrongly intialized sample counts;
|
||||
SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64);
|
||||
|
||||
desc.fOrigin = resolve_origin(desc.fOrigin, isRT);
|
||||
|
||||
GrTexture* tex = nullptr;
|
||||
GrGpuResource::LifeCycle lifeCycle = SkBudgeted::kYes == budgeted ?
|
||||
GrGpuResource::kCached_LifeCycle :
|
||||
GrGpuResource::kUncached_LifeCycle;
|
||||
|
||||
if (GrPixelConfigIsCompressed(desc.fConfig)) {
|
||||
// We shouldn't be rendering into this
|
||||
SkASSERT(!isRT);
|
||||
SkASSERT(0 == desc.fSampleCnt);
|
||||
|
||||
if (!this->caps()->npotTextureTileSupport() &&
|
||||
if (!caps->npotTextureTileSupport() &&
|
||||
(!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight))) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
this->handleDirtyContext();
|
||||
tex = this->onCreateCompressedTexture(desc, lifeCycle, srcData);
|
||||
tex = this->onCreateCompressedTexture(desc, lifeCycle, texels);
|
||||
} else {
|
||||
this->handleDirtyContext();
|
||||
tex = this->onCreateTexture(desc, lifeCycle, srcData, rowBytes);
|
||||
}
|
||||
if (!this->caps()->reuseScratchTextures() && !isRT) {
|
||||
tex->resourcePriv().removeScratchKey();
|
||||
tex = this->onCreateTexture(desc, lifeCycle, texels);
|
||||
}
|
||||
if (tex) {
|
||||
if (!caps->reuseScratchTextures() && !isRT) {
|
||||
tex->resourcePriv().removeScratchKey();
|
||||
}
|
||||
fStats.incTextureCreates();
|
||||
if (srcData) {
|
||||
fStats.incTextureUploads();
|
||||
if (!texels.empty()) {
|
||||
if (texels[0].fPixels) {
|
||||
fStats.incTextureUploads();
|
||||
}
|
||||
}
|
||||
}
|
||||
return tex;
|
||||
}
|
||||
|
||||
GrTexture* GrGpu::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
|
||||
const void* srcData, size_t rowBytes) {
|
||||
GrMipLevel level;
|
||||
level.fPixels = srcData;
|
||||
level.fRowBytes = rowBytes;
|
||||
SkSTArray<1, GrMipLevel> levels;
|
||||
levels.push_back(level);
|
||||
|
||||
return this->createTexture(desc, budgeted, levels);
|
||||
}
|
||||
|
||||
GrTexture* GrGpu::wrapBackendTexture(const GrBackendTextureDesc& desc, GrWrapOwnership ownership) {
|
||||
this->handleDirtyContext();
|
||||
if (!this->caps()->isConfigTexturable(desc.fConfig)) {
|
||||
@ -355,20 +387,42 @@ bool GrGpu::readPixels(GrSurface* surface,
|
||||
|
||||
bool GrGpu::writePixels(GrSurface* surface,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, const void* buffer,
|
||||
size_t rowBytes) {
|
||||
if (!buffer || !surface) {
|
||||
GrPixelConfig config, const SkTArray<GrMipLevel>& texels) {
|
||||
if (!surface) {
|
||||
return false;
|
||||
}
|
||||
bool validMipDataFound = false;
|
||||
for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
|
||||
if (texels[currentMipLevel].fPixels != nullptr) {
|
||||
validMipDataFound = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!validMipDataFound) {
|
||||
return false;
|
||||
}
|
||||
|
||||
this->handleDirtyContext();
|
||||
if (this->onWritePixels(surface, left, top, width, height, config, buffer, rowBytes)) {
|
||||
if (this->onWritePixels(surface, left, top, width, height, config, texels)) {
|
||||
fStats.incTextureUploads();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool GrGpu::writePixels(GrSurface* surface,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, const void* buffer,
|
||||
size_t rowBytes) {
|
||||
GrMipLevel mipLevel;
|
||||
mipLevel.fPixels = buffer;
|
||||
mipLevel.fRowBytes = rowBytes;
|
||||
SkSTArray<1, GrMipLevel> texels;
|
||||
texels.push_back(mipLevel);
|
||||
|
||||
return this->writePixels(surface, left, top, width, height, config, texels);
|
||||
}
|
||||
|
||||
bool GrGpu::transferPixels(GrSurface* surface,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, GrTransferBuffer* buffer,
|
||||
|
@ -13,8 +13,10 @@
|
||||
#include "GrStencil.h"
|
||||
#include "GrSwizzle.h"
|
||||
#include "GrTextureParamsAdjuster.h"
|
||||
#include "GrTypes.h"
|
||||
#include "GrXferProcessor.h"
|
||||
#include "SkPath.h"
|
||||
#include "SkTArray.h"
|
||||
|
||||
class GrBatchTracker;
|
||||
class GrContext;
|
||||
@ -82,17 +84,31 @@ public:
|
||||
*
|
||||
* @param desc describes the texture to be created.
|
||||
* @param budgeted does this texture count against the resource cache budget?
|
||||
* @param srcData texel data to load texture. Begins with full-size
|
||||
* palette data for paletted textures. For compressed
|
||||
* formats it contains the compressed pixel data. Otherwise,
|
||||
* it contains width*height texels. If nullptr texture data
|
||||
* is uninitialized.
|
||||
* @param rowBytes the number of bytes between consecutive rows. Zero
|
||||
* means rows are tightly packed. This field is ignored
|
||||
* for compressed formats.
|
||||
*
|
||||
* @param texels array of mipmap levels containing texel data to load.
|
||||
* Each level begins with full-size palette data for paletted textures.
|
||||
* For compressed formats the level contains the compressed pixel data.
|
||||
* Otherwise, it contains width*height texels. If there is only one
|
||||
* element and it contains nullptr fPixels, texture data is
|
||||
* uninitialized.
|
||||
* @return The texture object if successful, otherwise nullptr.
|
||||
*/
|
||||
GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
|
||||
const SkTArray<GrMipLevel>& texels);
|
||||
|
||||
/**
|
||||
* This function is a shim which creates a SkTArGrMipLevell> of size 1.
|
||||
* It then calls createTexture with that SkTArray.
|
||||
*
|
||||
* @param srcData texel data to load texture. Begins with full-size
|
||||
* palette data for paletted texture. For compressed
|
||||
* formats it contains the compressed pixel data. Otherwise,
|
||||
* it contains width*height texels. If nullptr texture data
|
||||
* is uninitialized.
|
||||
* @param rowBytes the number of bytes between consecutive rows. Zero
|
||||
* means rows are tightly packed. This field is ignored
|
||||
* for compressed pixel formats.
|
||||
* @return The texture object if successful, otherwise, nullptr.
|
||||
*/
|
||||
GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
|
||||
const void* srcData, size_t rowBytes);
|
||||
|
||||
@ -260,9 +276,20 @@ public:
|
||||
* @param width width of rectangle to write in pixels.
|
||||
* @param height height of rectangle to write in pixels.
|
||||
* @param config the pixel config of the source buffer
|
||||
* @param buffer memory to read pixels from
|
||||
* @param rowBytes number of bytes between consecutive rows. Zero
|
||||
* means rows are tightly packed.
|
||||
* @param texels array of mipmap levels containing texture data
|
||||
*/
|
||||
bool writePixels(GrSurface* surface,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config,
|
||||
const SkTArray<GrMipLevel>& texels);
|
||||
|
||||
/**
|
||||
* This function is a shim which creates a SkTArray<GrMipLevel> of size 1.
|
||||
* It then calls writePixels with that SkTArray.
|
||||
*
|
||||
* @param buffer memory to read pixels from.
|
||||
* @param rowBytes number of bytes between consecutive rows. Zero
|
||||
* means rows are tightly packed.
|
||||
*/
|
||||
bool writePixels(GrSurface* surface,
|
||||
int left, int top, int width, int height,
|
||||
@ -510,10 +537,11 @@ private:
|
||||
// onCreateTexture/CompressedTexture are called.
|
||||
virtual GrTexture* onCreateTexture(const GrSurfaceDesc& desc,
|
||||
GrGpuResource::LifeCycle lifeCycle,
|
||||
const void* srcData, size_t rowBytes) = 0;
|
||||
const SkTArray<GrMipLevel>& texels) = 0;
|
||||
virtual GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc,
|
||||
GrGpuResource::LifeCycle lifeCycle,
|
||||
const void* srcData) = 0;
|
||||
const SkTArray<GrMipLevel>& texels) = 0;
|
||||
|
||||
virtual GrTexture* onWrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership) = 0;
|
||||
virtual GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&,
|
||||
GrWrapOwnership) = 0;
|
||||
@ -555,8 +583,8 @@ private:
|
||||
// overridden by backend-specific derived class to perform the surface write
|
||||
virtual bool onWritePixels(GrSurface*,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, const void* buffer,
|
||||
size_t rowBytes) = 0;
|
||||
GrPixelConfig config,
|
||||
const SkTArray<GrMipLevel>& texels) = 0;
|
||||
|
||||
// overridden by backend-specific derived class to perform the surface write
|
||||
virtual bool onTransferPixels(GrSurface*,
|
||||
|
@ -81,7 +81,7 @@ GrBitmapTextureMaker::GrBitmapTextureMaker(GrContext* context, const SkBitmap& b
|
||||
}
|
||||
}
|
||||
|
||||
GrTexture* GrBitmapTextureMaker::refOriginalTexture() {
|
||||
GrTexture* GrBitmapTextureMaker::refOriginalTexture(bool willBeMipped) {
|
||||
GrTexture* tex;
|
||||
|
||||
if (fOriginalKey.isValid()) {
|
||||
@ -91,7 +91,11 @@ GrTexture* GrBitmapTextureMaker::refOriginalTexture() {
|
||||
}
|
||||
}
|
||||
|
||||
tex = GrUploadBitmapToTexture(this->context(), fBitmap);
|
||||
if (willBeMipped) {
|
||||
tex = GrGenerateMipMapsAndUploadToTexture(this->context(), fBitmap);
|
||||
} else {
|
||||
tex = GrUploadBitmapToTexture(this->context(), fBitmap);
|
||||
}
|
||||
if (tex && fOriginalKey.isValid()) {
|
||||
tex->resourcePriv().setUniqueKey(fOriginalKey);
|
||||
GrInstallBitmapUniqueKeyInvalidator(fOriginalKey, fBitmap.pixelRef());
|
||||
@ -126,8 +130,8 @@ GrImageTextureMaker::GrImageTextureMaker(GrContext* context, SkImageCacherator*
|
||||
}
|
||||
}
|
||||
|
||||
GrTexture* GrImageTextureMaker::refOriginalTexture() {
|
||||
return fCacher->lockTexture(this->context(), fOriginalKey, fClient, fCachingHint);
|
||||
GrTexture* GrImageTextureMaker::refOriginalTexture(bool willBeMipped) {
|
||||
return fCacher->lockTexture(this->context(), fOriginalKey, fClient, fCachingHint, willBeMipped);
|
||||
}
|
||||
|
||||
void GrImageTextureMaker::makeCopyKey(const CopyParams& stretch, GrUniqueKey* paramsCopyKey) {
|
||||
|
@ -55,7 +55,7 @@ public:
|
||||
GrBitmapTextureMaker(GrContext* context, const SkBitmap& bitmap);
|
||||
|
||||
protected:
|
||||
GrTexture* refOriginalTexture() override;
|
||||
GrTexture* refOriginalTexture(bool willBeMipped) override;
|
||||
|
||||
void makeCopyKey(const CopyParams& copyParams, GrUniqueKey* copyKey) override;
|
||||
|
||||
@ -80,7 +80,7 @@ protected:
|
||||
// able to efficiently produce a "stretched" texture natively (e.g. picture-backed)
|
||||
// GrTexture* generateTextureForParams(const CopyParams&) override;
|
||||
|
||||
GrTexture* refOriginalTexture() override;
|
||||
GrTexture* refOriginalTexture(bool willBeMipped) override;
|
||||
void makeCopyKey(const CopyParams& stretch, GrUniqueKey* paramsCopyKey) override;
|
||||
void didCacheCopy(const GrUniqueKey& copyKey) override;
|
||||
|
||||
|
@ -100,8 +100,8 @@ bool GrSurface::writePixels(int left, int top, int width, int height,
|
||||
if (nullptr == context) {
|
||||
return false;
|
||||
}
|
||||
return context->writeSurfacePixels(this, left, top, width, height, config, buffer, rowBytes,
|
||||
pixelOpsFlags);
|
||||
return context->writeSurfacePixels(this, left, top, width, height, config, buffer,
|
||||
rowBytes, pixelOpsFlags);
|
||||
}
|
||||
|
||||
bool GrSurface::readPixels(int left, int top, int width, int height,
|
||||
|
@ -328,12 +328,12 @@ private:
|
||||
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
|
||||
|
||||
GrTexture* onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
|
||||
const void* srcData, size_t rowBytes) override {
|
||||
const SkTArray<GrMipLevel>& texels) override {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle,
|
||||
const void* srcData) override {
|
||||
const SkTArray<GrMipLevel>& texels) override {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -372,8 +372,7 @@ private:
|
||||
|
||||
bool onWritePixels(GrSurface* surface,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, const void* buffer,
|
||||
size_t rowBytes) override {
|
||||
GrPixelConfig config, const SkTArray<GrMipLevel>& texels) override {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -14,6 +14,10 @@
|
||||
#include "GrRenderTargetPriv.h"
|
||||
#include "GrTexture.h"
|
||||
#include "GrTexturePriv.h"
|
||||
#include "GrTypes.h"
|
||||
#include "SkMath.h"
|
||||
#include "SkMipMap.h"
|
||||
#include "SkTypes.h"
|
||||
|
||||
void GrTexture::dirtyMipMaps(bool mipMapsDirty) {
|
||||
if (mipMapsDirty) {
|
||||
@ -26,6 +30,8 @@ void GrTexture::dirtyMipMaps(bool mipMapsDirty) {
|
||||
if (sizeChanged) {
|
||||
// This must not be called until after changing fMipMapsStatus.
|
||||
this->didChangeGpuMemorySize();
|
||||
// TODO(http://skbug.com/4548) - The desc and scratch key should be
|
||||
// updated to reflect the newly-allocated mipmaps.
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -81,16 +87,23 @@ GrSurfaceOrigin resolve_origin(const GrSurfaceDesc& desc) {
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
GrTexture::GrTexture(GrGpu* gpu, LifeCycle lifeCycle, const GrSurfaceDesc& desc)
|
||||
: INHERITED(gpu, lifeCycle, desc)
|
||||
, fMipMapsStatus(kNotAllocated_MipMapsStatus) {
|
||||
|
||||
GrTexture::GrTexture(GrGpu* gpu, LifeCycle lifeCycle, const GrSurfaceDesc& desc,
|
||||
bool wasMipMapDataProvided)
|
||||
: INHERITED(gpu, lifeCycle, desc) {
|
||||
if (!this->isExternal() && !GrPixelConfigIsCompressed(desc.fConfig) &&
|
||||
!desc.fTextureStorageAllocator.fAllocateTextureStorage) {
|
||||
GrScratchKey key;
|
||||
GrTexturePriv::ComputeScratchKey(desc, &key);
|
||||
this->setScratchKey(key);
|
||||
}
|
||||
|
||||
if (wasMipMapDataProvided) {
|
||||
fMipMapsStatus = kValid_MipMapsStatus;
|
||||
fMaxMipMapLevel = SkMipMap::ComputeLevelCount(fDesc.fWidth, fDesc.fHeight);
|
||||
} else {
|
||||
fMipMapsStatus = kNotAllocated_MipMapsStatus;
|
||||
fMaxMipMapLevel = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void GrTexturePriv::ComputeScratchKey(const GrSurfaceDesc& desc, GrScratchKey* key) {
|
||||
@ -99,7 +112,9 @@ void GrTexturePriv::ComputeScratchKey(const GrSurfaceDesc& desc, GrScratchKey* k
|
||||
GrSurfaceOrigin origin = resolve_origin(desc);
|
||||
uint32_t flags = desc.fFlags & ~kCheckAllocation_GrSurfaceFlag;
|
||||
|
||||
SkASSERT(static_cast<int>(desc.fConfig) < (1 << 6));
|
||||
// make sure desc.fConfig fits in 5 bits
|
||||
SkASSERT(sk_float_log2(kLast_GrPixelConfig) <= 5);
|
||||
SkASSERT(static_cast<int>(desc.fConfig) < (1 << 5));
|
||||
SkASSERT(desc.fSampleCnt < (1 << 8));
|
||||
SkASSERT(flags < (1 << 10));
|
||||
SkASSERT(static_cast<int>(origin) < (1 << 8));
|
||||
@ -107,5 +122,6 @@ void GrTexturePriv::ComputeScratchKey(const GrSurfaceDesc& desc, GrScratchKey* k
|
||||
GrScratchKey::Builder builder(key, kType, 3);
|
||||
builder[0] = desc.fWidth;
|
||||
builder[1] = desc.fHeight;
|
||||
builder[2] = desc.fConfig | (desc.fSampleCnt << 6) | (flags << 14) | (origin << 24);
|
||||
builder[2] = desc.fConfig | (desc.fIsMipMapped << 5) | (desc.fSampleCnt << 6) | (flags << 14)
|
||||
| (origin << 24);
|
||||
}
|
||||
|
@ -432,9 +432,15 @@ const GrFragmentProcessor* GrTextureAdjuster::createFragmentProcessor(
|
||||
|
||||
GrTexture* GrTextureMaker::refTextureForParams(const GrTextureParams& params) {
|
||||
CopyParams copyParams;
|
||||
bool willBeMipped = params.filterMode() == GrTextureParams::kMipMap_FilterMode;
|
||||
|
||||
if (!fContext->caps()->mipMapSupport()) {
|
||||
willBeMipped = false;
|
||||
}
|
||||
|
||||
if (!fContext->getGpu()->makeCopyForTextureParams(this->width(), this->height(), params,
|
||||
©Params)) {
|
||||
return this->refOriginalTexture();
|
||||
return this->refOriginalTexture(willBeMipped);
|
||||
}
|
||||
GrUniqueKey copyKey;
|
||||
this->makeCopyKey(copyParams, ©Key);
|
||||
@ -445,7 +451,7 @@ GrTexture* GrTextureMaker::refTextureForParams(const GrTextureParams& params) {
|
||||
}
|
||||
}
|
||||
|
||||
GrTexture* result = this->generateTextureForParams(copyParams);
|
||||
GrTexture* result = this->generateTextureForParams(copyParams, willBeMipped);
|
||||
if (!result) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -499,8 +505,9 @@ const GrFragmentProcessor* GrTextureMaker::createFragmentProcessor(
|
||||
filterOrNullForBicubic);
|
||||
}
|
||||
|
||||
GrTexture* GrTextureMaker::generateTextureForParams(const CopyParams& copyParams) {
|
||||
SkAutoTUnref<GrTexture> original(this->refOriginalTexture());
|
||||
GrTexture* GrTextureMaker::generateTextureForParams(const CopyParams& copyParams,
|
||||
bool willBeMipped) {
|
||||
SkAutoTUnref<GrTexture> original(this->refOriginalTexture(willBeMipped));
|
||||
if (!original) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ protected:
|
||||
* Return the maker's "original" texture. It is the responsibility of the maker to handle any
|
||||
* caching of the original if desired.
|
||||
*/
|
||||
virtual GrTexture* refOriginalTexture() = 0;
|
||||
virtual GrTexture* refOriginalTexture(bool willBeMipped) = 0;
|
||||
|
||||
/**
|
||||
* Return a new (uncached) texture that is the stretch of the maker's original.
|
||||
@ -197,7 +197,7 @@ protected:
|
||||
* Subclass may override this if they can handle creating the texture more directly than
|
||||
* by copying.
|
||||
*/
|
||||
virtual GrTexture* generateTextureForParams(const CopyParams&);
|
||||
virtual GrTexture* generateTextureForParams(const CopyParams&, bool willBeMipped);
|
||||
|
||||
GrContext* context() const { return fContext; }
|
||||
|
||||
|
@ -39,6 +39,14 @@ public:
|
||||
return GrTexture::kNotAllocated_MipMapsStatus != fTexture->fMipMapsStatus;
|
||||
}
|
||||
|
||||
void setMaxMipMapLevel(int maxMipMapLevel) const {
|
||||
fTexture->fMaxMipMapLevel = maxMipMapLevel;
|
||||
}
|
||||
|
||||
int maxMipMapLevel() const {
|
||||
return fTexture->fMaxMipMapLevel;
|
||||
}
|
||||
|
||||
static void ComputeScratchKey(const GrSurfaceDesc&, GrScratchKey*);
|
||||
|
||||
private:
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "GrResourceCache.h"
|
||||
#include "GrGpu.h"
|
||||
#include "../private/GrSingleOwner.h"
|
||||
#include "SkTArray.h"
|
||||
|
||||
#define ASSERT_SINGLE_OWNER \
|
||||
SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
|
||||
@ -29,9 +30,10 @@ GrTextureProvider::GrTextureProvider(GrGpu* gpu, GrResourceCache* cache, GrSingl
|
||||
{
|
||||
}
|
||||
|
||||
GrTexture* GrTextureProvider::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
|
||||
const void* srcData, size_t rowBytes) {
|
||||
GrTexture* GrTextureProvider::createMipMappedTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
|
||||
const GrMipLevel* texels, int mipLevelCount) {
|
||||
ASSERT_SINGLE_OWNER
|
||||
|
||||
if (this->isAbandoned()) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -41,20 +43,38 @@ GrTexture* GrTextureProvider::createTexture(const GrSurfaceDesc& desc, SkBudgete
|
||||
}
|
||||
if (!GrPixelConfigIsCompressed(desc.fConfig) &&
|
||||
!desc.fTextureStorageAllocator.fAllocateTextureStorage) {
|
||||
static const uint32_t kFlags = kExact_ScratchTextureFlag |
|
||||
kNoCreate_ScratchTextureFlag;
|
||||
if (GrTexture* texture = this->refScratchTexture(desc, kFlags)) {
|
||||
if (!srcData || texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
|
||||
srcData, rowBytes)) {
|
||||
if (SkBudgeted::kNo == budgeted) {
|
||||
texture->resourcePriv().makeUnbudgeted();
|
||||
if (mipLevelCount < 2) {
|
||||
const GrMipLevel& baseMipLevel = texels[0];
|
||||
static const uint32_t kFlags = kExact_ScratchTextureFlag |
|
||||
kNoCreate_ScratchTextureFlag;
|
||||
if (GrTexture* texture = this->refScratchTexture(desc, kFlags)) {
|
||||
if (texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
|
||||
baseMipLevel.fPixels, baseMipLevel.fRowBytes)) {
|
||||
if (SkBudgeted::kNo == budgeted) {
|
||||
texture->resourcePriv().makeUnbudgeted();
|
||||
}
|
||||
return texture;
|
||||
}
|
||||
return texture;
|
||||
texture->unref();
|
||||
}
|
||||
texture->unref();
|
||||
}
|
||||
}
|
||||
return fGpu->createTexture(desc, budgeted, srcData, rowBytes);
|
||||
|
||||
SkTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
|
||||
for (int i = 0; i < mipLevelCount; ++i) {
|
||||
texelsShallowCopy.push_back(texels[i]);
|
||||
}
|
||||
return fGpu->createTexture(desc, budgeted, texelsShallowCopy);
|
||||
}
|
||||
|
||||
GrTexture* GrTextureProvider::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
|
||||
const void* srcData, size_t rowBytes) {
|
||||
const int mipLevelCount = 1;
|
||||
GrMipLevel texels[mipLevelCount];
|
||||
texels[0].fPixels = srcData;
|
||||
texels[0].fRowBytes = rowBytes;
|
||||
|
||||
return this->createMipMappedTexture(desc, budgeted, texels, mipLevelCount);
|
||||
}
|
||||
|
||||
GrTexture* GrTextureProvider::createApproxTexture(const GrSurfaceDesc& desc) {
|
||||
|
@ -217,6 +217,7 @@ GrRenderTarget* SkGpuDevice::CreateRenderTarget(
|
||||
desc.fConfig = SkImageInfo2GrPixelConfig(info);
|
||||
desc.fSampleCnt = sampleCount;
|
||||
desc.fTextureStorageAllocator = textureStorageAllocator;
|
||||
desc.fIsMipMapped = false;
|
||||
GrTexture* texture = context->textureProvider()->createTexture(desc, budgeted, nullptr, 0);
|
||||
if (nullptr == texture) {
|
||||
return nullptr;
|
||||
|
@ -10,9 +10,10 @@
|
||||
|
||||
#include "GrCaps.h"
|
||||
#include "GrContext.h"
|
||||
#include "GrTextureParamsAdjuster.h"
|
||||
#include "GrGpuResourcePriv.h"
|
||||
#include "GrImageIDTextureAdjuster.h"
|
||||
#include "GrTextureParamsAdjuster.h"
|
||||
#include "GrTypes.h"
|
||||
#include "GrXferProcessor.h"
|
||||
#include "GrYUVProvider.h"
|
||||
|
||||
@ -23,8 +24,10 @@
|
||||
#include "SkErrorInternals.h"
|
||||
#include "SkGrPixelRef.h"
|
||||
#include "SkMessageBus.h"
|
||||
#include "SkMipMap.h"
|
||||
#include "SkPixelRef.h"
|
||||
#include "SkResourceCache.h"
|
||||
#include "SkTemplates.h"
|
||||
#include "SkTextureCompressor.h"
|
||||
#include "SkYUVPlanesCache.h"
|
||||
#include "effects/GrBicubicEffect.h"
|
||||
@ -285,6 +288,65 @@ void GrInstallBitmapUniqueKeyInvalidator(const GrUniqueKey& key, SkPixelRef* pix
|
||||
pixelRef->addGenIDChangeListener(new Invalidator(key));
|
||||
}
|
||||
|
||||
GrTexture* GrGenerateMipMapsAndUploadToTexture(GrContext* ctx, const SkBitmap& bitmap)
|
||||
{
|
||||
GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(bitmap.info());
|
||||
if (kIndex_8_SkColorType != bitmap.colorType() && !bitmap.readyToDraw()) {
|
||||
GrTexture* texture = load_etc1_texture(ctx, bitmap, desc);
|
||||
if (texture) {
|
||||
return texture;
|
||||
}
|
||||
}
|
||||
|
||||
GrTexture* texture = create_texture_from_yuv(ctx, bitmap, desc);
|
||||
if (texture) {
|
||||
return texture;
|
||||
}
|
||||
|
||||
SkASSERT(sizeof(int) <= sizeof(uint32_t));
|
||||
if (bitmap.width() < 0 || bitmap.height() < 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
SkAutoPixmapUnlock srcUnlocker;
|
||||
if (!bitmap.requestLock(&srcUnlocker)) {
|
||||
return nullptr;
|
||||
}
|
||||
const SkPixmap& pixmap = srcUnlocker.pixmap();
|
||||
// Try to catch where we might have returned nullptr for src crbug.com/492818
|
||||
if (nullptr == pixmap.addr()) {
|
||||
sk_throw();
|
||||
}
|
||||
|
||||
SkAutoTDelete<SkMipMap> mipmaps(SkMipMap::Build(pixmap, nullptr));
|
||||
if (!mipmaps) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const int mipLevelCount = mipmaps->countLevels() + 1;
|
||||
if (mipLevelCount < 1) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const bool isMipMapped = mipLevelCount > 1;
|
||||
desc.fIsMipMapped = isMipMapped;
|
||||
|
||||
SkAutoTDeleteArray<GrMipLevel> texels(new GrMipLevel[mipLevelCount]);
|
||||
|
||||
texels[0].fPixels = pixmap.addr();
|
||||
texels[0].fRowBytes = pixmap.rowBytes();
|
||||
|
||||
for (int i = 1; i < mipLevelCount; ++i) {
|
||||
SkMipMap::Level generatedMipLevel;
|
||||
mipmaps->getLevel(i - 1, &generatedMipLevel);
|
||||
texels[i].fPixels = generatedMipLevel.fPixmap.addr();
|
||||
texels[i].fRowBytes = generatedMipLevel.fPixmap.rowBytes();
|
||||
}
|
||||
|
||||
return ctx->textureProvider()->createMipMappedTexture(desc, SkBudgeted::kYes, texels.get(),
|
||||
mipLevelCount);
|
||||
}
|
||||
|
||||
GrTexture* GrRefCachedBitmapTexture(GrContext* ctx, const SkBitmap& bitmap,
|
||||
const GrTextureParams& params) {
|
||||
if (bitmap.getTexture()) {
|
||||
|
@ -77,6 +77,7 @@ static SkGrPixelRef* copy_to_new_texture_pixelref(GrTexture* texture, SkColorTyp
|
||||
desc.fFlags = kRenderTarget_GrSurfaceFlag;
|
||||
desc.fConfig = SkImageInfo2GrPixelConfig(dstCT, kPremul_SkAlphaType, dstPT);
|
||||
desc.fTextureStorageAllocator = texture->desc().fTextureStorageAllocator;
|
||||
desc.fIsMipMapped = false;
|
||||
|
||||
GrTexture* dst = context->textureProvider()->createTexture(desc, SkBudgeted::kNo, nullptr, 0);
|
||||
if (nullptr == dst) {
|
||||
|
@ -119,6 +119,8 @@ GrPixelConfig GrIsCompressedTextureDataSupported(GrContext* ctx, SkData* data,
|
||||
*/
|
||||
GrTexture* GrUploadBitmapToTexture(GrContext*, const SkBitmap&);
|
||||
|
||||
GrTexture* GrGenerateMipMapsAndUploadToTexture(GrContext*, const SkBitmap&);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
GR_STATIC_ASSERT((int)kZero_GrBlendCoeff == (int)SkXfermode::kZero_Coeff);
|
||||
|
@ -175,6 +175,7 @@ void GrConfigConversionEffect::TestForPreservingPMConversions(GrContext* context
|
||||
desc.fWidth = 256;
|
||||
desc.fHeight = 256;
|
||||
desc.fConfig = kRGBA_8888_GrPixelConfig;
|
||||
desc.fIsMipMapped = false;
|
||||
|
||||
SkAutoTUnref<GrTexture> readTex(context->textureProvider()->createTexture(
|
||||
desc, SkBudgeted::kYes, nullptr, 0));
|
||||
|
@ -194,6 +194,7 @@ void GrTextureStripAtlas::lockTexture() {
|
||||
texDesc.fWidth = fDesc.fWidth;
|
||||
texDesc.fHeight = fDesc.fHeight;
|
||||
texDesc.fConfig = fDesc.fConfig;
|
||||
texDesc.fIsMipMapped = false;
|
||||
|
||||
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
|
||||
GrUniqueKey key;
|
||||
|
@ -212,6 +212,10 @@
|
||||
#define GR_GL_PACK_ALIGNMENT 0x0D05
|
||||
#define GR_GL_PACK_REVERSE_ROW_ORDER 0x93A4
|
||||
#define GR_GL_MAX_TEXTURE_SIZE 0x0D33
|
||||
#define GR_GL_TEXTURE_MIN_LOD 0x813A
|
||||
#define GR_GL_TEXTURE_MAX_LOD 0x813B
|
||||
#define GR_GL_TEXTURE_BASE_LEVEL 0x813C
|
||||
#define GR_GL_TEXTURE_MAX_LEVEL 0x813D
|
||||
#define GR_GL_MAX_VIEWPORT_DIMS 0x0D3A
|
||||
#define GR_GL_SUBPIXEL_BITS 0x0D50
|
||||
#define GR_GL_RED_BITS 0x0D52
|
||||
|
@ -21,8 +21,11 @@
|
||||
#include "glsl/GrGLSL.h"
|
||||
#include "glsl/GrGLSLCaps.h"
|
||||
#include "glsl/GrGLSLPLSPathRendering.h"
|
||||
#include "SkMipMap.h"
|
||||
#include "SkPixmap.h"
|
||||
#include "SkStrokeRec.h"
|
||||
#include "SkTemplates.h"
|
||||
#include "SkTypes.h"
|
||||
|
||||
#define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
|
||||
#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
|
||||
@ -555,7 +558,10 @@ GrTexture* GrGLGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
|
||||
|
||||
#ifdef SK_IGNORE_GL_TEXTURE_TARGET
|
||||
idDesc.fInfo.fID = static_cast<GrGLuint>(desc.fTextureHandle);
|
||||
// We only support GL_TEXTURE_2D at the moment.
|
||||
// When we create the texture, we only
|
||||
// create GL_TEXTURE_2D at the moment.
|
||||
// External clients can do something different.
|
||||
|
||||
idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
|
||||
#else
|
||||
idDesc.fInfo = *info;
|
||||
@ -809,8 +815,8 @@ static bool check_write_and_transfer_input(GrGLTexture* glTex, GrSurface* surfac
|
||||
|
||||
bool GrGLGpu::onWritePixels(GrSurface* surface,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, const void* buffer,
|
||||
size_t rowBytes) {
|
||||
GrPixelConfig config,
|
||||
const SkTArray<GrMipLevel>& texels) {
|
||||
GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
|
||||
|
||||
if (!check_write_and_transfer_input(glTex, surface, config)) {
|
||||
@ -824,19 +830,14 @@ bool GrGLGpu::onWritePixels(GrSurface* surface,
|
||||
if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
|
||||
// We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixels()
|
||||
SkASSERT(config == glTex->desc().fConfig);
|
||||
success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), buffer,
|
||||
success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), texels,
|
||||
kWrite_UploadType, left, top, width, height);
|
||||
} else {
|
||||
success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_UploadType,
|
||||
left, top, width, height, config, buffer, rowBytes);
|
||||
left, top, width, height, config, texels);
|
||||
}
|
||||
|
||||
if (success) {
|
||||
glTex->texturePriv().dirtyMipMaps(true);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return success;
|
||||
}
|
||||
|
||||
bool GrGLGpu::onTransferPixels(GrSurface* surface,
|
||||
@ -865,9 +866,13 @@ bool GrGLGpu::onTransferPixels(GrSurface* surface,
|
||||
GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID()));
|
||||
|
||||
bool success = false;
|
||||
GrMipLevel mipLevel;
|
||||
mipLevel.fPixels = buffer;
|
||||
mipLevel.fRowBytes = rowBytes;
|
||||
SkSTArray<1, GrMipLevel> texels;
|
||||
texels.push_back(mipLevel);
|
||||
success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_UploadType,
|
||||
left, top, width, height, config, buffer, rowBytes);
|
||||
|
||||
left, top, width, height, config, texels);
|
||||
if (success) {
|
||||
glTex->texturePriv().dirtyMipMaps(true);
|
||||
return true;
|
||||
@ -906,30 +911,178 @@ static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates storage space for the texture and fills it with texels.
|
||||
*
|
||||
* @param desc The surface descriptor for the texture being created.
|
||||
* @param interface The GL interface in use.
|
||||
* @param target The GL target to which the texture is bound
|
||||
* @param internalFormat The data format used for the internal storage of the texture.
|
||||
* @param externalFormat The data format used for the external storage of the texture.
|
||||
* @param externalType The type of the data used for the external storage of the texture.
|
||||
* @param texels The texel data of the texture being created.
|
||||
* @param baseWidth The width of the texture's base mipmap level
|
||||
* @param baseHeight The height of the texture's base mipmap level
|
||||
* @param succeeded Set to true if allocating and populating the texture completed
|
||||
* without error.
|
||||
*/
|
||||
static void allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc,
|
||||
const GrGLInterface& interface,
|
||||
GrGLenum target,
|
||||
GrGLenum internalFormat,
|
||||
GrGLenum externalFormat,
|
||||
GrGLenum externalType,
|
||||
const SkTArray<GrMipLevel>& texels,
|
||||
int baseWidth, int baseHeight,
|
||||
bool* succeeded) {
|
||||
CLEAR_ERROR_BEFORE_ALLOC(&interface);
|
||||
*succeeded = true;
|
||||
for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
|
||||
int twoToTheMipLevel = 1 << currentMipLevel;
|
||||
int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
|
||||
int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
|
||||
const void* currentMipData = texels[currentMipLevel].fPixels;
|
||||
// Even if curremtMipData is nullptr, continue to call TexImage2D.
|
||||
// This will allocate texture memory which we can later populate.
|
||||
GL_ALLOC_CALL(&interface,
|
||||
TexImage2D(target,
|
||||
currentMipLevel,
|
||||
internalFormat,
|
||||
currentWidth,
|
||||
currentHeight,
|
||||
0, // border
|
||||
externalFormat, externalType,
|
||||
currentMipData));
|
||||
GrGLenum error = check_alloc_error(desc, &interface);
|
||||
if (error != GR_GL_NO_ERROR) {
|
||||
*succeeded = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates storage space for the texture and fills it with texels.
|
||||
*
|
||||
* @param desc The surface descriptor for the texture being created.
|
||||
* @param interface The GL interface in use.
|
||||
* @param target The GL target to which the texture is bound
|
||||
* @param internalFormat The data format used for the internal storage of the texture.
|
||||
* @param texels The texel data of the texture being created.
|
||||
* @param baseWidth The width of the texture's base mipmap level
|
||||
* @param baseHeight The height of the texture's base mipmap level
|
||||
*/
|
||||
static bool allocate_and_populate_compressed_texture(const GrSurfaceDesc& desc,
|
||||
const GrGLInterface& interface,
|
||||
GrGLenum target, GrGLenum internalFormat,
|
||||
const SkTArray<GrMipLevel>& texels,
|
||||
int baseWidth, int baseHeight) {
|
||||
CLEAR_ERROR_BEFORE_ALLOC(&interface);
|
||||
for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
|
||||
int twoToTheMipLevel = 1 << currentMipLevel;
|
||||
int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
|
||||
int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
|
||||
|
||||
// Make sure that the width and height that we pass to OpenGL
|
||||
// is a multiple of the block size.
|
||||
size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, baseWidth, baseHeight);
|
||||
|
||||
GL_ALLOC_CALL(&interface,
|
||||
CompressedTexImage2D(target,
|
||||
currentMipLevel,
|
||||
internalFormat,
|
||||
currentWidth,
|
||||
currentHeight,
|
||||
0, // border
|
||||
SkToInt(dataSize),
|
||||
texels[currentMipLevel].fPixels));
|
||||
|
||||
GrGLenum error = check_alloc_error(desc, &interface);
|
||||
if (error != GR_GL_NO_ERROR) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* After a texture is created, any state which was altered during its creation
|
||||
* needs to be restored.
|
||||
*
|
||||
* @param interface The GL interface to use.
|
||||
* @param caps The capabilities of the GL device.
|
||||
* @param restoreGLRowLength Should the row length unpacking be restored?
|
||||
* @param glFlipY Did GL flip the texture vertically?
|
||||
*/
|
||||
static void restore_pixelstore_state(const GrGLInterface& interface, const GrGLCaps& caps,
|
||||
bool restoreGLRowLength, bool glFlipY) {
|
||||
if (restoreGLRowLength) {
|
||||
SkASSERT(caps.unpackRowLengthSupport());
|
||||
GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
|
||||
}
|
||||
if (glFlipY) {
|
||||
GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
|
||||
}
|
||||
}
|
||||
|
||||
bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
|
||||
GrGLenum target,
|
||||
UploadType uploadType,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig dataConfig,
|
||||
const void* dataOrOffset,
|
||||
size_t rowBytes) {
|
||||
SkASSERT(dataOrOffset || kNewTexture_UploadType == uploadType ||
|
||||
kTransfer_UploadType == uploadType);
|
||||
|
||||
const SkTArray<GrMipLevel>& texels) {
|
||||
// If we're uploading compressed data then we should be using uploadCompressedTexData
|
||||
SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
|
||||
|
||||
SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
|
||||
|
||||
// texels is const.
|
||||
// But we may need to flip the texture vertically to prepare it.
|
||||
// Rather than flip in place and alter the incoming data,
|
||||
// we allocate a new buffer to flip into.
|
||||
// This means we need to make a non-const shallow copy of texels.
|
||||
SkTArray<GrMipLevel> texelsShallowCopy(texels);
|
||||
|
||||
for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0;
|
||||
currentMipLevel--) {
|
||||
SkASSERT(texelsShallowCopy[currentMipLevel].fPixels ||
|
||||
kNewTexture_UploadType == uploadType || kTransfer_UploadType == uploadType);
|
||||
}
|
||||
|
||||
const GrGLInterface* interface = this->glInterface();
|
||||
const GrGLCaps& caps = this->glCaps();
|
||||
|
||||
size_t bpp = GrBytesPerPixel(dataConfig);
|
||||
if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
|
||||
&width, &height, &dataOrOffset, &rowBytes)) {
|
||||
|
||||
if (width == 0 || height == 0) {
|
||||
return false;
|
||||
}
|
||||
size_t trimRowBytes = width * bpp;
|
||||
|
||||
// in case we need a temporary, trimmed copy of the src pixels
|
||||
SkAutoSMalloc<128 * 128> tempStorage;
|
||||
for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
|
||||
int twoToTheMipLevel = 1 << currentMipLevel;
|
||||
int currentWidth = SkTMax(1, width / twoToTheMipLevel);
|
||||
int currentHeight = SkTMax(1, height / twoToTheMipLevel);
|
||||
|
||||
if (texelsShallowCopy[currentMipLevel].fPixels == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (currentHeight > SK_MaxS32 ||
|
||||
currentWidth > SK_MaxS32) {
|
||||
return false;
|
||||
}
|
||||
if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
|
||||
¤tWidth,
|
||||
¤tHeight,
|
||||
&texelsShallowCopy[currentMipLevel].fPixels,
|
||||
&texelsShallowCopy[currentMipLevel].fRowBytes)) {
|
||||
return false;
|
||||
}
|
||||
if (currentWidth < 0 || currentHeight < 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Internal format comes from the texture desc.
|
||||
GrGLenum internalFormat;
|
||||
@ -949,31 +1102,67 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
|
||||
bool restoreGLRowLength = false;
|
||||
bool swFlipY = false;
|
||||
bool glFlipY = false;
|
||||
if (dataOrOffset) {
|
||||
if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
|
||||
if (this->glCaps().unpackFlipYSupport()) {
|
||||
glFlipY = true;
|
||||
} else {
|
||||
swFlipY = true;
|
||||
}
|
||||
|
||||
if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
|
||||
if (caps.unpackFlipYSupport()) {
|
||||
glFlipY = true;
|
||||
} else {
|
||||
swFlipY = true;
|
||||
}
|
||||
if (this->glCaps().unpackRowLengthSupport() && !swFlipY) {
|
||||
}
|
||||
|
||||
// in case we need a temporary, trimmed copy of the src pixels
|
||||
SkAutoSMalloc<128 * 128> tempStorage;
|
||||
|
||||
// find the combined size of all the mip levels and the relative offset of
|
||||
// each into the collective buffer
|
||||
size_t combined_buffer_size = 0;
|
||||
SkTArray<size_t> individual_mip_offsets(texelsShallowCopy.count());
|
||||
for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
|
||||
int twoToTheMipLevel = 1 << currentMipLevel;
|
||||
int currentWidth = SkTMax(1, width / twoToTheMipLevel);
|
||||
int currentHeight = SkTMax(1, height / twoToTheMipLevel);
|
||||
const size_t trimmedSize = currentWidth * bpp * currentHeight;
|
||||
individual_mip_offsets.push_back(combined_buffer_size);
|
||||
combined_buffer_size += trimmedSize;
|
||||
}
|
||||
char* buffer = (char*)tempStorage.reset(combined_buffer_size);
|
||||
|
||||
for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
|
||||
if (texelsShallowCopy[currentMipLevel].fPixels == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int twoToTheMipLevel = 1 << currentMipLevel;
|
||||
int currentWidth = SkTMax(1, width / twoToTheMipLevel);
|
||||
int currentHeight = SkTMax(1, height / twoToTheMipLevel);
|
||||
const size_t trimRowBytes = currentWidth * bpp;
|
||||
|
||||
/*
|
||||
* check whether to allocate a temporary buffer for flipping y or
|
||||
* because our srcData has extra bytes past each row. If so, we need
|
||||
* to trim those off here, since GL ES may not let us specify
|
||||
* GL_UNPACK_ROW_LENGTH.
|
||||
*/
|
||||
restoreGLRowLength = false;
|
||||
|
||||
const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
|
||||
if (caps.unpackRowLengthSupport() && !swFlipY) {
|
||||
// can't use this for flipping, only non-neg values allowed. :(
|
||||
if (rowBytes != trimRowBytes) {
|
||||
GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
|
||||
GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
|
||||
GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
|
||||
restoreGLRowLength = true;
|
||||
}
|
||||
} else if (kTransfer_UploadType != uploadType) {
|
||||
if (trimRowBytes != rowBytes || swFlipY) {
|
||||
// copy data into our new storage, skipping the trailing bytes
|
||||
size_t trimSize = height * trimRowBytes;
|
||||
const char* src = (const char*)dataOrOffset;
|
||||
if (swFlipY) {
|
||||
src += (height - 1) * rowBytes;
|
||||
const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
|
||||
if (swFlipY && currentHeight >= 1) {
|
||||
src += (currentHeight - 1) * rowBytes;
|
||||
}
|
||||
char* dst = (char*)tempStorage.reset(trimSize);
|
||||
for (int y = 0; y < height; y++) {
|
||||
char* dst = buffer + individual_mip_offsets[currentMipLevel];
|
||||
for (int y = 0; y < currentHeight; y++) {
|
||||
memcpy(dst, src, trimRowBytes);
|
||||
if (swFlipY) {
|
||||
src -= rowBytes;
|
||||
@ -983,59 +1172,54 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
|
||||
dst += trimRowBytes;
|
||||
}
|
||||
// now point data to our copied version
|
||||
dataOrOffset = tempStorage.get();
|
||||
texelsShallowCopy[currentMipLevel].fPixels = buffer +
|
||||
individual_mip_offsets[currentMipLevel];
|
||||
texelsShallowCopy[currentMipLevel].fRowBytes = trimRowBytes;
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
if (glFlipY) {
|
||||
GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
|
||||
GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
|
||||
}
|
||||
GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(dataConfig)));
|
||||
GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT,
|
||||
config_alignment(desc.fConfig)));
|
||||
}
|
||||
|
||||
bool succeeded = true;
|
||||
if (kNewTexture_UploadType == uploadType) {
|
||||
if (dataOrOffset &&
|
||||
!(0 == left && 0 == top && desc.fWidth == width && desc.fHeight == height)) {
|
||||
succeeded = false;
|
||||
} else {
|
||||
if (desc.fTextureStorageAllocator.fAllocateTextureStorage) {
|
||||
if (dataOrOffset) {
|
||||
GL_CALL(TexSubImage2D(target,
|
||||
0, // level
|
||||
left, top,
|
||||
width, height,
|
||||
externalFormat, externalType, dataOrOffset));
|
||||
}
|
||||
} else {
|
||||
CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
|
||||
GL_ALLOC_CALL(this->glInterface(), TexImage2D(
|
||||
target, 0, internalFormat, desc.fWidth, desc.fHeight, 0, externalFormat,
|
||||
externalType, dataOrOffset));
|
||||
GrGLenum error = check_alloc_error(desc, this->glInterface());
|
||||
if (error != GR_GL_NO_ERROR) {
|
||||
succeeded = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (kNewTexture_UploadType == uploadType &&
|
||||
0 == left && 0 == top &&
|
||||
desc.fWidth == width && desc.fHeight == height &&
|
||||
!desc.fTextureStorageAllocator.fAllocateTextureStorage) {
|
||||
allocate_and_populate_uncompressed_texture(desc, *interface, target,
|
||||
internalFormat, externalFormat,
|
||||
externalType, texelsShallowCopy,
|
||||
width, height, &succeeded);
|
||||
} else {
|
||||
if (swFlipY || glFlipY) {
|
||||
top = desc.fHeight - (top + height);
|
||||
}
|
||||
GL_CALL(TexSubImage2D(target,
|
||||
0, // level
|
||||
left, top,
|
||||
width, height,
|
||||
externalFormat, externalType, dataOrOffset));
|
||||
for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count();
|
||||
currentMipLevel++) {
|
||||
int twoToTheMipLevel = 1 << currentMipLevel;
|
||||
int currentWidth = SkTMax(1, width / twoToTheMipLevel);
|
||||
int currentHeight = SkTMax(1, height / twoToTheMipLevel);
|
||||
if (texelsShallowCopy[currentMipLevel].fPixels == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
GL_CALL(TexSubImage2D(target,
|
||||
currentMipLevel,
|
||||
left, top,
|
||||
currentWidth,
|
||||
currentHeight,
|
||||
externalFormat, externalType,
|
||||
texelsShallowCopy[currentMipLevel].fPixels));
|
||||
}
|
||||
}
|
||||
|
||||
if (restoreGLRowLength) {
|
||||
SkASSERT(this->glCaps().unpackRowLengthSupport());
|
||||
GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
|
||||
}
|
||||
if (glFlipY) {
|
||||
GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
|
||||
}
|
||||
restore_pixelstore_state(*interface, caps, restoreGLRowLength, glFlipY);
|
||||
|
||||
return succeeded;
|
||||
}
|
||||
|
||||
@ -1046,16 +1230,19 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
|
||||
// see fit if they want to go against the "standard" way to do it.
|
||||
bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
|
||||
GrGLenum target,
|
||||
const void* data,
|
||||
const SkTArray<GrMipLevel>& texels,
|
||||
UploadType uploadType,
|
||||
int left, int top, int width, int height) {
|
||||
SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
|
||||
SkASSERT(kTransfer_UploadType != uploadType &&
|
||||
(data || kNewTexture_UploadType != uploadType));
|
||||
SkASSERT(kTransfer_UploadType != uploadType &&
|
||||
(texels[0].fPixels || kNewTexture_UploadType != uploadType));
|
||||
|
||||
// No support for software flip y, yet...
|
||||
SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin);
|
||||
|
||||
const GrGLInterface* interface = this->glInterface();
|
||||
const GrGLCaps& caps = this->glCaps();
|
||||
|
||||
if (-1 == width) {
|
||||
width = desc.fWidth;
|
||||
}
|
||||
@ -1074,42 +1261,42 @@ bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
|
||||
}
|
||||
#endif
|
||||
|
||||
// Make sure that the width and height that we pass to OpenGL
|
||||
// is a multiple of the block size.
|
||||
size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height);
|
||||
|
||||
// We only need the internal format for compressed 2D textures.
|
||||
GrGLenum internalFormat;
|
||||
if (!this->glCaps().getCompressedTexImageFormats(desc.fConfig, &internalFormat)) {
|
||||
if (!caps.getCompressedTexImageFormats(desc.fConfig, &internalFormat)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (kNewTexture_UploadType == uploadType) {
|
||||
CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
|
||||
GL_ALLOC_CALL(this->glInterface(),
|
||||
CompressedTexImage2D(target,
|
||||
0, // level
|
||||
internalFormat,
|
||||
width, height,
|
||||
0, // border
|
||||
SkToInt(dataSize),
|
||||
data));
|
||||
GrGLenum error = check_alloc_error(desc, this->glInterface());
|
||||
if (error != GR_GL_NO_ERROR) {
|
||||
return false;
|
||||
}
|
||||
return allocate_and_populate_compressed_texture(desc, *interface, target, internalFormat,
|
||||
texels, width, height);
|
||||
} else {
|
||||
// Paletted textures can't be updated.
|
||||
if (GR_GL_PALETTE8_RGBA8 == internalFormat) {
|
||||
return false;
|
||||
}
|
||||
GL_CALL(CompressedTexSubImage2D(target,
|
||||
0, // level
|
||||
left, top,
|
||||
width, height,
|
||||
internalFormat,
|
||||
SkToInt(dataSize),
|
||||
data));
|
||||
for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
|
||||
if (texels[currentMipLevel].fPixels == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int twoToTheMipLevel = 1 << currentMipLevel;
|
||||
int currentWidth = SkTMax(1, width / twoToTheMipLevel);
|
||||
int currentHeight = SkTMax(1, height / twoToTheMipLevel);
|
||||
|
||||
// Make sure that the width and height that we pass to OpenGL
|
||||
// is a multiple of the block size.
|
||||
size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, currentWidth,
|
||||
currentHeight);
|
||||
GL_CALL(CompressedTexSubImage2D(target,
|
||||
currentMipLevel,
|
||||
left, top,
|
||||
currentWidth,
|
||||
currentHeight,
|
||||
internalFormat,
|
||||
SkToInt(dataSize),
|
||||
texels[currentMipLevel].fPixels));
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -1273,9 +1460,48 @@ static size_t as_size_t(int x) {
|
||||
}
|
||||
#endif
|
||||
|
||||
static GrGLTexture::IDDesc generate_gl_texture(const GrGLInterface* interface,
|
||||
GrGpuResource::LifeCycle lifeCycle) {
|
||||
GrGLTexture::IDDesc idDesc;
|
||||
idDesc.fInfo.fID = 0;
|
||||
GR_GL_CALL(interface, GenTextures(1, &idDesc.fInfo.fID));
|
||||
idDesc.fLifeCycle = lifeCycle;
|
||||
// When we create the texture, we only
|
||||
// create GL_TEXTURE_2D at the moment.
|
||||
// External clients can do something different.
|
||||
idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
|
||||
return idDesc;
|
||||
}
|
||||
|
||||
static void set_initial_texture_params(const GrGLInterface* interface,
|
||||
const GrGLTextureInfo& info,
|
||||
GrGLTexture::TexParams* initialTexParams) {
|
||||
// Some drivers like to know filter/wrap before seeing glTexImage2D. Some
|
||||
// drivers have a bug where an FBO won't be complete if it includes a
|
||||
// texture that is not mipmap complete (considering the filter in use).
|
||||
// we only set a subset here so invalidate first
|
||||
initialTexParams->invalidate();
|
||||
initialTexParams->fMinFilter = GR_GL_NEAREST;
|
||||
initialTexParams->fMagFilter = GR_GL_NEAREST;
|
||||
initialTexParams->fWrapS = GR_GL_CLAMP_TO_EDGE;
|
||||
initialTexParams->fWrapT = GR_GL_CLAMP_TO_EDGE;
|
||||
GR_GL_CALL(interface, TexParameteri(info.fTarget,
|
||||
GR_GL_TEXTURE_MAG_FILTER,
|
||||
initialTexParams->fMagFilter));
|
||||
GR_GL_CALL(interface, TexParameteri(info.fTarget,
|
||||
GR_GL_TEXTURE_MIN_FILTER,
|
||||
initialTexParams->fMinFilter));
|
||||
GR_GL_CALL(interface, TexParameteri(info.fTarget,
|
||||
GR_GL_TEXTURE_WRAP_S,
|
||||
initialTexParams->fWrapS));
|
||||
GR_GL_CALL(interface, TexParameteri(info.fTarget,
|
||||
GR_GL_TEXTURE_WRAP_T,
|
||||
initialTexParams->fWrapT));
|
||||
}
|
||||
|
||||
GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
|
||||
GrGpuResource::LifeCycle lifeCycle,
|
||||
const void* srcData, size_t rowBytes) {
|
||||
const SkTArray<GrMipLevel>& texels) {
|
||||
// We fail if the MSAA was requested and is not available.
|
||||
if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) {
|
||||
//SkDebugf("MSAA RT requested but not supported on this platform.");
|
||||
@ -1287,8 +1513,7 @@ GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
|
||||
GrGLTexture::IDDesc idDesc;
|
||||
idDesc.fLifeCycle = lifeCycle;
|
||||
GrGLTexture::TexParams initialTexParams;
|
||||
if (!this->createTextureImpl(desc, &idDesc.fInfo, renderTarget, srcData,
|
||||
&initialTexParams, rowBytes)) {
|
||||
if (!this->createTextureImpl(desc, &idDesc.fInfo, renderTarget, &initialTexParams, texels)) {
|
||||
return return_null_texture();
|
||||
}
|
||||
|
||||
@ -1304,31 +1529,29 @@ GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
|
||||
}
|
||||
tex = new GrGLTextureRenderTarget(this, desc, idDesc, rtIDDesc);
|
||||
} else {
|
||||
tex = new GrGLTexture(this, desc, idDesc);
|
||||
bool wasMipMapDataProvided = false;
|
||||
if (texels.count() > 1) {
|
||||
wasMipMapDataProvided = true;
|
||||
}
|
||||
tex = new GrGLTexture(this, desc, idDesc, wasMipMapDataProvided);
|
||||
}
|
||||
tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
|
||||
#ifdef TRACE_TEXTURE_CREATION
|
||||
SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n",
|
||||
glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
|
||||
glTexDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig);
|
||||
#endif
|
||||
return tex;
|
||||
}
|
||||
|
||||
GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
|
||||
GrGpuResource::LifeCycle lifeCycle,
|
||||
const void* srcData) {
|
||||
const SkTArray<GrMipLevel>& texels) {
|
||||
// Make sure that we're not flipping Y.
|
||||
if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
|
||||
return return_null_texture();
|
||||
}
|
||||
|
||||
GrGLTexture::IDDesc idDesc;
|
||||
idDesc.fInfo.fID = 0;
|
||||
GL_CALL(GenTextures(1, &idDesc.fInfo.fID));
|
||||
idDesc.fLifeCycle = lifeCycle;
|
||||
// We only support GL_TEXTURE_2D at the moment.
|
||||
idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
|
||||
|
||||
GrGLTexture::IDDesc idDesc = generate_gl_texture(this->glInterface(), lifeCycle);
|
||||
if (!idDesc.fInfo.fID) {
|
||||
return return_null_texture();
|
||||
}
|
||||
@ -1336,30 +1559,10 @@ GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
|
||||
this->setScratchTextureUnit();
|
||||
GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID));
|
||||
|
||||
// Some drivers like to know filter/wrap before seeing glTexImage2D. Some
|
||||
// drivers have a bug where an FBO won't be complete if it includes a
|
||||
// texture that is not mipmap complete (considering the filter in use).
|
||||
GrGLTexture::TexParams initialTexParams;
|
||||
// we only set a subset here so invalidate first
|
||||
initialTexParams.invalidate();
|
||||
initialTexParams.fMinFilter = GR_GL_NEAREST;
|
||||
initialTexParams.fMagFilter = GR_GL_NEAREST;
|
||||
initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
|
||||
initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
|
||||
GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
||||
GR_GL_TEXTURE_MAG_FILTER,
|
||||
initialTexParams.fMagFilter));
|
||||
GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
||||
GR_GL_TEXTURE_MIN_FILTER,
|
||||
initialTexParams.fMinFilter));
|
||||
GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
||||
GR_GL_TEXTURE_WRAP_S,
|
||||
initialTexParams.fWrapS));
|
||||
GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
||||
GR_GL_TEXTURE_WRAP_T,
|
||||
initialTexParams.fWrapT));
|
||||
set_initial_texture_params(this->glInterface(), idDesc.fInfo, &initialTexParams);
|
||||
|
||||
if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, srcData)) {
|
||||
if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, texels)) {
|
||||
GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
|
||||
return return_null_texture();
|
||||
}
|
||||
@ -1369,7 +1572,7 @@ GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
|
||||
tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
|
||||
#ifdef TRACE_TEXTURE_CREATION
|
||||
SkDebugf("--- new compressed texture [%d] size=(%d %d) config=%d\n",
|
||||
glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
|
||||
glTexDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig);
|
||||
#endif
|
||||
return tex;
|
||||
}
|
||||
@ -1512,21 +1715,10 @@ int GrGLGpu::getCompatibleStencilIndex(GrPixelConfig config) {
|
||||
}
|
||||
|
||||
bool GrGLGpu::createTextureImpl(const GrSurfaceDesc& desc, GrGLTextureInfo* info,
|
||||
bool renderTarget, const void* srcData,
|
||||
GrGLTexture::TexParams* initialTexParams, size_t rowBytes) {
|
||||
// Some drivers like to know filter/wrap before seeing glTexImage2D. Some
|
||||
// drivers have a bug where an FBO won't be complete if it includes a
|
||||
// texture that is not mipmap complete (considering the filter in use).
|
||||
|
||||
// we only set a subset here so invalidate first
|
||||
initialTexParams->invalidate();
|
||||
initialTexParams->fMinFilter = GR_GL_NEAREST;
|
||||
initialTexParams->fMagFilter = GR_GL_NEAREST;
|
||||
initialTexParams->fWrapS = GR_GL_CLAMP_TO_EDGE;
|
||||
initialTexParams->fWrapT = GR_GL_CLAMP_TO_EDGE;
|
||||
|
||||
bool renderTarget, GrGLTexture::TexParams* initialTexParams,
|
||||
const SkTArray<GrMipLevel>& texels) {
|
||||
if (desc.fTextureStorageAllocator.fAllocateTextureStorage) {
|
||||
return this->createTextureExternalAllocatorImpl(desc, info, srcData, rowBytes);
|
||||
return this->createTextureExternalAllocatorImpl(desc, info, texels);
|
||||
}
|
||||
|
||||
info->fID = 0;
|
||||
@ -1547,32 +1739,32 @@ bool GrGLGpu::createTextureImpl(const GrSurfaceDesc& desc, GrGLTextureInfo* info
|
||||
GR_GL_FRAMEBUFFER_ATTACHMENT));
|
||||
}
|
||||
|
||||
GL_CALL(TexParameteri(info->fTarget,
|
||||
GR_GL_TEXTURE_MAG_FILTER,
|
||||
initialTexParams->fMagFilter));
|
||||
GL_CALL(TexParameteri(info->fTarget,
|
||||
GR_GL_TEXTURE_MIN_FILTER,
|
||||
initialTexParams->fMinFilter));
|
||||
GL_CALL(TexParameteri(info->fTarget,
|
||||
GR_GL_TEXTURE_WRAP_S,
|
||||
initialTexParams->fWrapS));
|
||||
GL_CALL(TexParameteri(info->fTarget,
|
||||
GR_GL_TEXTURE_WRAP_T,
|
||||
initialTexParams->fWrapT));
|
||||
if (info) {
|
||||
set_initial_texture_params(this->glInterface(), *info, initialTexParams);
|
||||
}
|
||||
if (!this->uploadTexData(desc, info->fTarget, kNewTexture_UploadType, 0, 0,
|
||||
desc.fWidth, desc.fHeight,
|
||||
desc.fConfig, srcData, rowBytes)) {
|
||||
desc.fConfig, texels)) {
|
||||
GL_CALL(DeleteTextures(1, &(info->fID)));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GrGLGpu::createTextureExternalAllocatorImpl(
|
||||
const GrSurfaceDesc& desc, GrGLTextureInfo* info, const void* srcData, size_t rowBytes) {
|
||||
bool GrGLGpu::createTextureExternalAllocatorImpl(const GrSurfaceDesc& desc,
|
||||
GrGLTextureInfo* info,
|
||||
const SkTArray<GrMipLevel>& texels) {
|
||||
// We do not make SkTArray available outside of Skia,
|
||||
// and so we do not want to allow mipmaps to external
|
||||
// allocators just yet.
|
||||
SkASSERT(texels.count() == 1);
|
||||
SkSTArray<1, GrMipLevel> texelsShallowCopy(1);
|
||||
texelsShallowCopy.push_back(texels[0]);
|
||||
|
||||
switch (desc.fTextureStorageAllocator.fAllocateTextureStorage(
|
||||
desc.fTextureStorageAllocator.fCtx, reinterpret_cast<GrBackendObject>(info),
|
||||
desc.fWidth, desc.fHeight, desc.fConfig, srcData, desc.fOrigin)) {
|
||||
desc.fWidth, desc.fHeight, desc.fConfig, texelsShallowCopy[0].fPixels,
|
||||
desc.fOrigin)) {
|
||||
case GrTextureStorageAllocator::Result::kSucceededAndUploaded:
|
||||
return true;
|
||||
case GrTextureStorageAllocator::Result::kFailed:
|
||||
@ -1583,7 +1775,7 @@ bool GrGLGpu::createTextureExternalAllocatorImpl(
|
||||
|
||||
if (!this->uploadTexData(desc, info->fTarget, kNewTexture_UploadType, 0, 0,
|
||||
desc.fWidth, desc.fHeight,
|
||||
desc.fConfig, srcData, rowBytes)) {
|
||||
desc.fConfig, texelsShallowCopy)) {
|
||||
desc.fTextureStorageAllocator.fDeallocateTextureStorage(
|
||||
desc.fTextureStorageAllocator.fCtx, reinterpret_cast<GrBackendObject>(info));
|
||||
return false;
|
||||
@ -3039,12 +3231,17 @@ void GrGLGpu::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTextur
|
||||
newTexParams.fMinFilter = glMinFilterModes[filterMode];
|
||||
newTexParams.fMagFilter = glMagFilterModes[filterMode];
|
||||
|
||||
if (GrTextureParams::kMipMap_FilterMode == filterMode &&
|
||||
texture->texturePriv().mipMapsAreDirty()) {
|
||||
GL_CALL(GenerateMipmap(target));
|
||||
texture->texturePriv().dirtyMipMaps(false);
|
||||
if (GrTextureParams::kMipMap_FilterMode == filterMode) {
|
||||
if (texture->texturePriv().mipMapsAreDirty()) {
|
||||
GL_CALL(GenerateMipmap(target));
|
||||
texture->texturePriv().dirtyMipMaps(false);
|
||||
texture->texturePriv().setMaxMipMapLevel(SkMipMap::ComputeLevelCount(
|
||||
texture->width(), texture->height()));
|
||||
}
|
||||
}
|
||||
|
||||
newTexParams.fMaxMipMapLevel = texture->texturePriv().maxMipMapLevel();
|
||||
|
||||
newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX());
|
||||
newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY());
|
||||
get_tex_param_swizzle(texture->config(), this->glCaps(), newTexParams.fSwizzleRGBA);
|
||||
@ -3056,6 +3253,17 @@ void GrGLGpu::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTextur
|
||||
this->setTextureUnit(unitIdx);
|
||||
GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newTexParams.fMinFilter));
|
||||
}
|
||||
if (setAll || newTexParams.fMaxMipMapLevel != oldTexParams.fMaxMipMapLevel) {
|
||||
if (newTexParams.fMaxMipMapLevel != 0) {
|
||||
this->setTextureUnit(unitIdx);
|
||||
GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_LOD, 0));
|
||||
GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL, 0));
|
||||
GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LOD,
|
||||
newTexParams.fMaxMipMapLevel));
|
||||
GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL,
|
||||
newTexParams.fMaxMipMapLevel));
|
||||
}
|
||||
}
|
||||
if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) {
|
||||
this->setTextureUnit(unitIdx);
|
||||
GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newTexParams.fWrapS));
|
||||
|
@ -21,7 +21,9 @@
|
||||
#include "GrGLVertexBuffer.h"
|
||||
#include "GrGpu.h"
|
||||
#include "GrPipelineBuilder.h"
|
||||
#include "GrTypes.h"
|
||||
#include "GrXferProcessor.h"
|
||||
#include "SkTArray.h"
|
||||
#include "SkTypes.h"
|
||||
|
||||
class GrPipeline;
|
||||
@ -145,10 +147,11 @@ private:
|
||||
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override;
|
||||
|
||||
GrTexture* onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
|
||||
const void* srcData, size_t rowBytes) override;
|
||||
const SkTArray<GrMipLevel>& texels) override;
|
||||
GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc,
|
||||
GrGpuResource::LifeCycle lifeCycle,
|
||||
const void* srcData) override;
|
||||
const SkTArray<GrMipLevel>& texels) override;
|
||||
|
||||
GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) override;
|
||||
GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) override;
|
||||
GrTransferBuffer* onCreateTransferBuffer(size_t size, TransferType type) override;
|
||||
@ -165,13 +168,13 @@ private:
|
||||
// texture. Otherwise, create the texture directly.
|
||||
// Returns whether the texture is successfully created. On success, the
|
||||
// result is stored in |info|.
|
||||
// The texture is populated with |srcData|, if it exists.
|
||||
// The texture is populated with |texels|, if it exists.
|
||||
// The texture parameters are cached in |initialTexParams|.
|
||||
bool createTextureImpl(const GrSurfaceDesc& desc, GrGLTextureInfo* info,
|
||||
bool renderTarget, const void* srcData,
|
||||
GrGLTexture::TexParams* initialTexParams, size_t rowBytes);
|
||||
bool renderTarget, GrGLTexture::TexParams* initialTexParams,
|
||||
const SkTArray<GrMipLevel>& texels);
|
||||
bool createTextureExternalAllocatorImpl(const GrSurfaceDesc& desc, GrGLTextureInfo* info,
|
||||
const void* srcData, size_t rowBytes);
|
||||
const SkTArray<GrMipLevel>& texels);
|
||||
|
||||
void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) override;
|
||||
|
||||
@ -203,8 +206,8 @@ private:
|
||||
|
||||
bool onWritePixels(GrSurface*,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, const void* buffer,
|
||||
size_t rowBytes) override;
|
||||
GrPixelConfig config,
|
||||
const SkTArray<GrMipLevel>& texels) override;
|
||||
|
||||
bool onTransferPixels(GrSurface*,
|
||||
int left, int top, int width, int height,
|
||||
@ -344,8 +347,7 @@ private:
|
||||
UploadType uploadType,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig dataConfig,
|
||||
const void* data,
|
||||
size_t rowBytes);
|
||||
const SkTArray<GrMipLevel>& texels);
|
||||
|
||||
// helper for onCreateCompressedTexture. If width and height are
|
||||
// set to -1, then this function will use desc.fWidth and desc.fHeight
|
||||
@ -355,7 +357,7 @@ private:
|
||||
// with new data.
|
||||
bool uploadCompressedTexData(const GrSurfaceDesc& desc,
|
||||
GrGLenum target,
|
||||
const void* data,
|
||||
const SkTArray<GrMipLevel>& texels,
|
||||
UploadType uploadType = kNewTexture_UploadType,
|
||||
int left = 0, int top = 0,
|
||||
int width = -1, int height = -1);
|
||||
|
@ -15,14 +15,22 @@
|
||||
// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
|
||||
GrGLTexture::GrGLTexture(GrGLGpu* gpu, const GrSurfaceDesc& desc, const IDDesc& idDesc)
|
||||
: GrSurface(gpu, idDesc.fLifeCycle, desc)
|
||||
, INHERITED(gpu, idDesc.fLifeCycle, desc) {
|
||||
, INHERITED(gpu, idDesc.fLifeCycle, desc, false) {
|
||||
this->init(desc, idDesc);
|
||||
this->registerWithCache();
|
||||
}
|
||||
|
||||
GrGLTexture::GrGLTexture(GrGLGpu* gpu, const GrSurfaceDesc& desc, const IDDesc& idDesc,
|
||||
bool wasMipMapDataProvided)
|
||||
: GrSurface(gpu, idDesc.fLifeCycle, desc)
|
||||
, INHERITED(gpu, idDesc.fLifeCycle, desc, wasMipMapDataProvided) {
|
||||
this->init(desc, idDesc);
|
||||
this->registerWithCache();
|
||||
}
|
||||
|
||||
GrGLTexture::GrGLTexture(GrGLGpu* gpu, const GrSurfaceDesc& desc, const IDDesc& idDesc, Derived)
|
||||
: GrSurface(gpu, idDesc.fLifeCycle, desc)
|
||||
, INHERITED(gpu, idDesc.fLifeCycle, desc) {
|
||||
, INHERITED(gpu, idDesc.fLifeCycle, desc, false) {
|
||||
this->init(desc, idDesc);
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,7 @@ public:
|
||||
GrGLenum fMagFilter;
|
||||
GrGLenum fWrapS;
|
||||
GrGLenum fWrapT;
|
||||
GrGLenum fMaxMipMapLevel;
|
||||
GrGLenum fSwizzleRGBA[4];
|
||||
void invalidate() { memset(this, 0xff, sizeof(TexParams)); }
|
||||
};
|
||||
@ -33,6 +34,7 @@ public:
|
||||
};
|
||||
|
||||
GrGLTexture(GrGLGpu*, const GrSurfaceDesc&, const IDDesc&);
|
||||
GrGLTexture(GrGLGpu*, const GrSurfaceDesc&, const IDDesc&, bool wasMipMapDataProvided);
|
||||
|
||||
GrBackendObject getTextureHandle() const override;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user