Replace uses of GrAssert by SkASSERT.
R=bsalomon@google.com Review URL: https://codereview.chromium.org/22850006 git-svn-id: http://skia.googlecode.com/svn/trunk@10789 2bbb7eff-a529-9590-31e7-b0007b416f81
This commit is contained in:
parent
069975678a
commit
f6de475e5c
@ -62,7 +62,7 @@ bool GrAndroidPathRenderer::onDrawPath(const SkPath& origPath,
|
||||
|
||||
// copy android verts to our vertex buffer
|
||||
if (antiAlias) {
|
||||
GrAssert(sizeof(ColorVertex) == drawState->getVertexSize());
|
||||
SkASSERT(sizeof(ColorVertex) == drawState->getVertexSize());
|
||||
ColorVertex* outVert = reinterpret_cast<ColorVertex*>(geo.vertices());
|
||||
android::uirenderer::AlphaVertex* inVert =
|
||||
reinterpret_cast<android::uirenderer::AlphaVertex*>(vertices.getBuffer());
|
||||
|
@ -266,10 +266,10 @@ bool GrStrokePathRenderer::onDrawPath(const SkPath& origPath,
|
||||
break;
|
||||
case SkPath::kQuad_Verb:
|
||||
case SkPath::kCubic_Verb:
|
||||
GrAssert(!"Curves not supported!");
|
||||
SkASSERT(!"Curves not supported!");
|
||||
default:
|
||||
// Unhandled cases
|
||||
GrAssert(false);
|
||||
SkASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -283,14 +283,14 @@ bool GrStrokePathRenderer::onDrawPath(const SkPath& origPath,
|
||||
lastPt [1] += dir;
|
||||
break;
|
||||
case SkPaint::kRound_Cap:
|
||||
GrAssert(!"Round caps not supported!");
|
||||
SkASSERT(!"Round caps not supported!");
|
||||
default: // No cap
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
GrAssert(vCount <= maxVertexCount);
|
||||
GrAssert(iCount <= maxIndexCount);
|
||||
SkASSERT(vCount <= maxVertexCount);
|
||||
SkASSERT(iCount <= maxIndexCount);
|
||||
|
||||
if (vCount > 0) {
|
||||
target->drawIndexed(kTriangles_GrPrimitiveType,
|
||||
|
@ -73,7 +73,7 @@ protected:
|
||||
// atomic inc returns the old value not the incremented value. So we add
|
||||
// 1 to the returned value.
|
||||
int32_t id = sk_atomic_inc(&fCurrEffectClassID) + 1;
|
||||
GrAssert(id < (1 << kClassIDBits));
|
||||
SkASSERT(id < (1 << kClassIDBits));
|
||||
return static_cast<EffectKey>(id);
|
||||
}
|
||||
|
||||
|
@ -35,10 +35,10 @@ typedef uint32_t GrColor;
|
||||
*/
|
||||
static inline GrColor GrColorPackRGBA(unsigned r, unsigned g,
|
||||
unsigned b, unsigned a) {
|
||||
GrAssert((uint8_t)r == r);
|
||||
GrAssert((uint8_t)g == g);
|
||||
GrAssert((uint8_t)b == b);
|
||||
GrAssert((uint8_t)a == a);
|
||||
SkASSERT((uint8_t)r == r);
|
||||
SkASSERT((uint8_t)g == g);
|
||||
SkASSERT((uint8_t)b == b);
|
||||
SkASSERT((uint8_t)a == a);
|
||||
return (r << GrColor_SHIFT_R) |
|
||||
(g << GrColor_SHIFT_G) |
|
||||
(b << GrColor_SHIFT_B) |
|
||||
@ -85,7 +85,7 @@ enum GrColorComponentFlags {
|
||||
};
|
||||
|
||||
static inline char GrColorComponentFlagToChar(GrColorComponentFlags component) {
|
||||
GrAssert(GrIsPow2(component));
|
||||
SkASSERT(GrIsPow2(component));
|
||||
switch (component) {
|
||||
case kR_GrColorComponentFlag:
|
||||
return 'r';
|
||||
@ -102,7 +102,7 @@ static inline char GrColorComponentFlagToChar(GrColorComponentFlags component) {
|
||||
}
|
||||
|
||||
static inline uint32_t GrPixelConfigComponentMask(GrPixelConfig config) {
|
||||
GrAssert(config >= 0 && config < kGrPixelConfigCnt);
|
||||
SkASSERT(config >= 0 && config < kGrPixelConfigCnt);
|
||||
static const uint32_t kFlags[] = {
|
||||
0, // kUnknown_GrPixelConfig
|
||||
kA_GrColorComponentFlag, // kAlpha_8_GrPixelConfig
|
||||
|
@ -276,7 +276,6 @@ typedef unsigned __int64 uint64_t;
|
||||
/**
|
||||
* Prettier forms of the above macros.
|
||||
*/
|
||||
#define GrAssert(COND) GR_DEBUGASSERT(COND)
|
||||
#define GrAlwaysAssert(COND) GR_ALWAYSASSERT(COND)
|
||||
|
||||
/**
|
||||
@ -285,8 +284,8 @@ typedef unsigned __int64 uint64_t;
|
||||
*/
|
||||
inline void GrCrash() { GrAlwaysAssert(false); }
|
||||
inline void GrCrash(const char* msg) { GrPrintf(msg); GrAlwaysAssert(false); }
|
||||
inline void GrDebugCrash() { GrAssert(false); }
|
||||
inline void GrDebugCrash(const char* msg) { GrPrintf(msg); GrAssert(false); }
|
||||
inline void GrDebugCrash() { SkASSERT(false); }
|
||||
inline void GrDebugCrash(const char* msg) { GrPrintf(msg); SkASSERT(false); }
|
||||
|
||||
/**
|
||||
* GR_DEBUGCODE compiles the code X in debug builds only
|
||||
|
@ -678,7 +678,7 @@ public:
|
||||
* Initializes by pre-concat'ing the context's current matrix with the preConcat param.
|
||||
*/
|
||||
void setPreConcat(GrContext* context, const SkMatrix& preConcat, GrPaint* paint = NULL) {
|
||||
GrAssert(NULL != context);
|
||||
SkASSERT(NULL != context);
|
||||
|
||||
this->restore();
|
||||
|
||||
@ -692,7 +692,7 @@ public:
|
||||
* update a paint but the matrix cannot be inverted.
|
||||
*/
|
||||
bool setIdentity(GrContext* context, GrPaint* paint = NULL) {
|
||||
GrAssert(NULL != context);
|
||||
SkASSERT(NULL != context);
|
||||
|
||||
this->restore();
|
||||
|
||||
@ -772,7 +772,7 @@ public:
|
||||
|
||||
AutoClip(GrContext* context, InitialClip initialState)
|
||||
: fContext(context) {
|
||||
GrAssert(kWideOpen_InitialClip == initialState);
|
||||
SkASSERT(kWideOpen_InitialClip == initialState);
|
||||
fNewClipData.fClipStack = &fNewClipStack;
|
||||
|
||||
fOldClip = context->getClip();
|
||||
@ -808,7 +808,7 @@ public:
|
||||
, fAutoRT(ctx, rt) {
|
||||
fAutoMatrix.setIdentity(ctx);
|
||||
// should never fail with no paint param.
|
||||
GrAssert(fAutoMatrix.succeeded());
|
||||
SkASSERT(fAutoMatrix.succeeded());
|
||||
}
|
||||
|
||||
private:
|
||||
@ -987,10 +987,10 @@ public:
|
||||
// The cache also has a ref which we are lending to the caller of detach(). When the caller
|
||||
// lets go of the ref and the ref count goes to 0 internal_dispose will see this flag is
|
||||
// set and re-ref the texture, thereby restoring the cache's ref.
|
||||
GrAssert(texture->getRefCnt() > 1);
|
||||
SkASSERT(texture->getRefCnt() > 1);
|
||||
texture->setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit);
|
||||
texture->unref();
|
||||
GrAssert(NULL != texture->getCacheEntry());
|
||||
SkASSERT(NULL != texture->getCacheEntry());
|
||||
|
||||
return texture;
|
||||
}
|
||||
|
@ -22,8 +22,8 @@ public:
|
||||
GrDrawEffect(const GrEffectStage& stage, bool explicitLocalCoords)
|
||||
: fEffectStage(&stage)
|
||||
, fExplicitLocalCoords(explicitLocalCoords) {
|
||||
GrAssert(NULL != fEffectStage);
|
||||
GrAssert(NULL != fEffectStage->getEffect());
|
||||
SkASSERT(NULL != fEffectStage);
|
||||
SkASSERT(NULL != fEffectStage->getEffect());
|
||||
}
|
||||
const GrEffectRef* effect() const { return fEffectStage->getEffect(); }
|
||||
|
||||
|
@ -167,7 +167,7 @@ public:
|
||||
/** Useful for effects that want to insert a texture matrix that is implied by the texture
|
||||
dimensions */
|
||||
static inline SkMatrix MakeDivByTextureWHMatrix(const GrTexture* texture) {
|
||||
GrAssert(NULL != texture);
|
||||
SkASSERT(NULL != texture);
|
||||
SkMatrix mat;
|
||||
mat.setIDiv(texture->width(), texture->height());
|
||||
return mat;
|
||||
@ -235,7 +235,7 @@ protected:
|
||||
|
||||
/** Used by GR_CREATE_STATIC_EFFECT below */
|
||||
static GrEffectRef* CreateStaticEffectRef(void* refStorage, GrEffect* effect) {
|
||||
GrAssert(NULL == effect->fEffectRef);
|
||||
SkASSERT(NULL == effect->fEffectRef);
|
||||
effect->fEffectRef = SkNEW_PLACEMENT_ARGS(refStorage, GrEffectRef, (effect));
|
||||
return effect->fEffectRef;
|
||||
}
|
||||
@ -289,9 +289,9 @@ private:
|
||||
bool result = this->onIsEqual(other);
|
||||
#if GR_DEBUG
|
||||
if (result) {
|
||||
GrAssert(this->numTextures() == other.numTextures());
|
||||
SkASSERT(this->numTextures() == other.numTextures());
|
||||
for (int i = 0; i < this->numTextures(); ++i) {
|
||||
GrAssert(*fTextureAccesses[i] == *other.fTextureAccesses[i]);
|
||||
SkASSERT(*fTextureAccesses[i] == *other.fTextureAccesses[i]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -320,7 +320,7 @@ private:
|
||||
};
|
||||
|
||||
inline GrEffectRef::GrEffectRef(GrEffect* effect) {
|
||||
GrAssert(NULL != effect);
|
||||
SkASSERT(NULL != effect);
|
||||
effect->ref();
|
||||
fEffect = effect;
|
||||
}
|
||||
|
@ -48,8 +48,8 @@ public:
|
||||
}
|
||||
|
||||
bool operator== (const GrEffectStage& other) const {
|
||||
GrAssert(NULL != fEffectRef.get());
|
||||
GrAssert(NULL != other.fEffectRef.get());
|
||||
SkASSERT(NULL != fEffectRef.get());
|
||||
SkASSERT(NULL != other.fEffectRef.get());
|
||||
|
||||
if (!(*this->getEffect())->isEqual(*other.getEffect())) {
|
||||
return false;
|
||||
@ -103,7 +103,7 @@ public:
|
||||
if (fCoordChangeMatrixSet) {
|
||||
savedCoordChange->fCoordChangeMatrix = fCoordChangeMatrix;
|
||||
}
|
||||
GrAssert(NULL == savedCoordChange->fEffectRef.get());
|
||||
SkASSERT(NULL == savedCoordChange->fEffectRef.get());
|
||||
GR_DEBUGCODE(SkRef(fEffectRef.get());)
|
||||
GR_DEBUGCODE(savedCoordChange->fEffectRef.reset(fEffectRef.get());)
|
||||
}
|
||||
@ -116,7 +116,7 @@ public:
|
||||
if (fCoordChangeMatrixSet) {
|
||||
fCoordChangeMatrix = savedCoordChange.fCoordChangeMatrix;
|
||||
}
|
||||
GrAssert(savedCoordChange.fEffectRef.get() == fEffectRef);
|
||||
SkASSERT(savedCoordChange.fEffectRef.get() == fEffectRef);
|
||||
GR_DEBUGCODE(savedCoordChange.fEffectRef.reset(NULL);)
|
||||
}
|
||||
|
||||
@ -137,8 +137,8 @@ public:
|
||||
}
|
||||
|
||||
void saveFrom(const GrEffectStage& stage) {
|
||||
GrAssert(!fInitialized);
|
||||
GrAssert(NULL != stage.fEffectRef.get());
|
||||
SkASSERT(!fInitialized);
|
||||
SkASSERT(NULL != stage.fEffectRef.get());
|
||||
stage.fEffectRef->get()->incDeferredRefCounts();
|
||||
fEffect = stage.fEffectRef->get();
|
||||
fCoordChangeMatrixSet = stage.fCoordChangeMatrixSet;
|
||||
@ -151,7 +151,7 @@ public:
|
||||
}
|
||||
|
||||
void restoreTo(GrEffectStage* stage) const {
|
||||
GrAssert(fInitialized);
|
||||
SkASSERT(fInitialized);
|
||||
stage->fEffectRef.reset(GrEffect::CreateEffectRef(fEffect));
|
||||
stage->fCoordChangeMatrixSet = fCoordChangeMatrixSet;
|
||||
if (fCoordChangeMatrixSet) {
|
||||
|
@ -109,7 +109,7 @@ public:
|
||||
* Appends an additional color effect to the color computation.
|
||||
*/
|
||||
const GrEffectRef* addColorEffect(const GrEffectRef* effect, int attr0 = -1, int attr1 = -1) {
|
||||
GrAssert(NULL != effect);
|
||||
SkASSERT(NULL != effect);
|
||||
SkNEW_APPEND_TO_TARRAY(&fColorStages, GrEffectStage, (effect, attr0, attr1));
|
||||
return effect;
|
||||
}
|
||||
@ -118,7 +118,7 @@ public:
|
||||
* Appends an additional coverage effect to the coverage computation.
|
||||
*/
|
||||
const GrEffectRef* addCoverageEffect(const GrEffectRef* effect, int attr0 = -1, int attr1 = -1) {
|
||||
GrAssert(NULL != effect);
|
||||
SkASSERT(NULL != effect);
|
||||
SkNEW_APPEND_TO_TARRAY(&fCoverageStages, GrEffectStage, (effect, attr0, attr1));
|
||||
return effect;
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ private:
|
||||
friend class GrTexture;
|
||||
// called by ~GrTexture to remove the non-ref'ed back ptr.
|
||||
void owningTextureDestroyed() {
|
||||
GrAssert(NULL != fTexture);
|
||||
SkASSERT(NULL != fTexture);
|
||||
fTexture = NULL;
|
||||
}
|
||||
|
||||
|
@ -69,8 +69,8 @@ public:
|
||||
void setCacheEntry(GrResourceEntry* cacheEntry) { fCacheEntry = cacheEntry; }
|
||||
GrResourceEntry* getCacheEntry() { return fCacheEntry; }
|
||||
|
||||
void incDeferredRefCount() const { GrAssert(fDeferredRefCount >= 0); ++fDeferredRefCount; }
|
||||
void decDeferredRefCount() const { GrAssert(fDeferredRefCount > 0); --fDeferredRefCount; }
|
||||
void incDeferredRefCount() const { SkASSERT(fDeferredRefCount >= 0); ++fDeferredRefCount; }
|
||||
void decDeferredRefCount() const { SkASSERT(fDeferredRefCount > 0); --fDeferredRefCount; }
|
||||
|
||||
protected:
|
||||
/**
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
SkIntToScalar(this->height())); }
|
||||
|
||||
GrSurfaceOrigin origin() const {
|
||||
GrAssert(kTopLeft_GrSurfaceOrigin == fDesc.fOrigin || kBottomLeft_GrSurfaceOrigin == fDesc.fOrigin);
|
||||
SkASSERT(kTopLeft_GrSurfaceOrigin == fDesc.fOrigin || kBottomLeft_GrSurfaceOrigin == fDesc.fOrigin);
|
||||
return fDesc.fOrigin;
|
||||
}
|
||||
|
||||
@ -81,7 +81,7 @@ public:
|
||||
return thisRT == other->asRenderTarget();
|
||||
} else {
|
||||
const GrTexture* thisTex = this->asTexture();
|
||||
GrAssert(NULL != thisTex); // We must be one or the other
|
||||
SkASSERT(NULL != thisTex); // We must be one or the other
|
||||
return thisTex == other->asTexture();
|
||||
}
|
||||
}
|
||||
|
@ -32,19 +32,19 @@ public:
|
||||
GLSL code generation. */
|
||||
virtual EffectKey glEffectKey(const GrDrawEffect& drawEffect,
|
||||
const GrGLCaps& caps) const SK_OVERRIDE {
|
||||
GrAssert(kIllegalEffectClassID != fEffectClassID);
|
||||
SkASSERT(kIllegalEffectClassID != fEffectClassID);
|
||||
EffectKey effectKey = GLEffect::GenKey(drawEffect, caps);
|
||||
EffectKey textureKey = GLEffect::GenTextureKey(drawEffect, caps);
|
||||
EffectKey attribKey = GLEffect::GenAttribKey(drawEffect);
|
||||
#if GR_DEBUG
|
||||
static const EffectKey kIllegalIDMask = (uint16_t) (~((1U << kEffectKeyBits) - 1));
|
||||
GrAssert(!(kIllegalIDMask & effectKey));
|
||||
SkASSERT(!(kIllegalIDMask & effectKey));
|
||||
|
||||
static const EffectKey kIllegalTextureKeyMask = (uint16_t) (~((1U << kTextureKeyBits) - 1));
|
||||
GrAssert(!(kIllegalTextureKeyMask & textureKey));
|
||||
SkASSERT(!(kIllegalTextureKeyMask & textureKey));
|
||||
|
||||
static const EffectKey kIllegalAttribKeyMask = (uint16_t) (~((1U << kAttribKeyBits) - 1));
|
||||
GrAssert(!(kIllegalAttribKeyMask & textureKey));
|
||||
SkASSERT(!(kIllegalAttribKeyMask & textureKey));
|
||||
#endif
|
||||
return fEffectClassID | (attribKey << (kEffectKeyBits+kTextureKeyBits)) |
|
||||
(textureKey << kEffectKeyBits) | effectKey;
|
||||
|
@ -100,11 +100,11 @@ public:
|
||||
* only.
|
||||
*/
|
||||
GrFixed normalizeFixedX(GrFixed x) const {
|
||||
GrAssert(GrIsPow2(fDesc.fWidth));
|
||||
SkASSERT(GrIsPow2(fDesc.fWidth));
|
||||
return x >> fShiftFixedX;
|
||||
}
|
||||
GrFixed normalizeFixedY(GrFixed y) const {
|
||||
GrAssert(GrIsPow2(fDesc.fHeight));
|
||||
SkASSERT(GrIsPow2(fDesc.fHeight));
|
||||
return y >> fShiftFixedY;
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ public:
|
||||
bool operator== (const GrTextureAccess& other) const {
|
||||
#if GR_DEBUG
|
||||
// below assumes all chars in fSwizzle are initialized even if string is < 4 chars long.
|
||||
GrAssert(memcmp(fSwizzle, other.fSwizzle, sizeof(fSwizzle)-1) ==
|
||||
SkASSERT(memcmp(fSwizzle, other.fSwizzle, sizeof(fSwizzle)-1) ==
|
||||
strcmp(fSwizzle, other.fSwizzle));
|
||||
#endif
|
||||
return fParams == other.fParams &&
|
||||
|
@ -74,7 +74,7 @@ template <typename T> const T& GrMax(const T& a, const T& b) {
|
||||
* divide, rounding up
|
||||
*/
|
||||
static inline int32_t GrIDivRoundUp(int x, int y) {
|
||||
GrAssert(y > 0);
|
||||
SkASSERT(y > 0);
|
||||
return (x + (y-1)) / y;
|
||||
}
|
||||
static inline uint32_t GrUIDivRoundUp(uint32_t x, uint32_t y) {
|
||||
@ -152,7 +152,7 @@ static inline uint32_t GrNextPow2(uint32_t n) {
|
||||
}
|
||||
|
||||
static inline int GrNextPow2(int n) {
|
||||
GrAssert(n >= 0); // this impl only works for non-neg.
|
||||
SkASSERT(n >= 0); // this impl only works for non-neg.
|
||||
return n ? (1 << (32 - SkCLZ(n - 1))) : 1;
|
||||
}
|
||||
|
||||
@ -166,7 +166,7 @@ typedef int32_t GrFixed;
|
||||
#if GR_DEBUG
|
||||
|
||||
static inline int16_t GrToS16(intptr_t x) {
|
||||
GrAssert((int16_t)x == x);
|
||||
SkASSERT((int16_t)x == x);
|
||||
return (int16_t)x;
|
||||
}
|
||||
|
||||
@ -256,7 +256,7 @@ enum GrMaskFormat {
|
||||
* Return the number of bytes-per-pixel for the specified mask format.
|
||||
*/
|
||||
static inline int GrMaskFormatBytesPerPixel(GrMaskFormat format) {
|
||||
GrAssert((unsigned)format <= 2);
|
||||
SkASSERT((unsigned)format <= 2);
|
||||
// kA8 (0) -> 1
|
||||
// kA565 (1) -> 2
|
||||
// kA888 (2) -> 4
|
||||
@ -472,7 +472,7 @@ public:
|
||||
* Initialize the cache ID to a domain and key.
|
||||
*/
|
||||
GrCacheID(Domain domain, const Key& key) {
|
||||
GrAssert(kInvalid_Domain != domain);
|
||||
SkASSERT(kInvalid_Domain != domain);
|
||||
this->reset(domain, key);
|
||||
}
|
||||
|
||||
@ -484,8 +484,8 @@ public:
|
||||
/** Has this been initialized to a valid domain */
|
||||
bool isValid() const { return kInvalid_Domain != fDomain; }
|
||||
|
||||
const Key& getKey() const { GrAssert(this->isValid()); return fKey; }
|
||||
Domain getDomain() const { GrAssert(this->isValid()); return fDomain; }
|
||||
const Key& getKey() const { SkASSERT(this->isValid()); return fKey; }
|
||||
Domain getDomain() const { SkASSERT(this->isValid()); return fDomain; }
|
||||
|
||||
/** Creates a new unique ID domain. */
|
||||
static Domain GenerateDomain();
|
||||
|
@ -33,7 +33,7 @@ static const int kGrSLTypeCount = kLast_GrSLType + 1;
|
||||
* Gets the vector size of the SLType. Returns -1 for void, matrices, and samplers.
|
||||
*/
|
||||
static inline int GrSLTypeVectorCount(GrSLType type) {
|
||||
GrAssert(type >= 0 && type < static_cast<GrSLType>(kGrSLTypeCount));
|
||||
SkASSERT(type >= 0 && type < static_cast<GrSLType>(kGrSLTypeCount));
|
||||
static const int kCounts[] = { -1, 1, 2, 3, 4, -1, -1, -1 };
|
||||
return kCounts[type];
|
||||
|
||||
@ -51,7 +51,7 @@ static inline int GrSLTypeVectorCount(GrSLType type) {
|
||||
/** Return the type enum for a vector of floats of length n (1..4),
|
||||
e.g. 1 -> kFloat_GrSLType, 2 -> kVec2_GrSLType, ... */
|
||||
static inline GrSLType GrSLFloatVectorType(int count) {
|
||||
GrAssert(count > 0 && count <= 4);
|
||||
SkASSERT(count > 0 && count <= 4);
|
||||
return (GrSLType)(count);
|
||||
|
||||
GR_STATIC_ASSERT(kFloat_GrSLType == 1);
|
||||
@ -78,7 +78,7 @@ static const int kGrVertexAttribTypeCount = kLast_GrVertexAttribType + 1;
|
||||
* Returns the vector size of the type.
|
||||
*/
|
||||
static inline int GrVertexAttribTypeVectorCount(GrVertexAttribType type) {
|
||||
GrAssert(type >= 0 && type < kGrVertexAttribTypeCount);
|
||||
SkASSERT(type >= 0 && type < kGrVertexAttribTypeCount);
|
||||
static const int kCounts[] = { 1, 2, 3, 4, 4 };
|
||||
return kCounts[type];
|
||||
|
||||
@ -94,7 +94,7 @@ static inline int GrVertexAttribTypeVectorCount(GrVertexAttribType type) {
|
||||
* Returns the size of the attrib type in bytes.
|
||||
*/
|
||||
static inline size_t GrVertexAttribTypeSize(GrVertexAttribType type) {
|
||||
GrAssert(type >= 0 && type < kGrVertexAttribTypeCount);
|
||||
SkASSERT(type >= 0 && type < kGrVertexAttribTypeCount);
|
||||
static const size_t kSizes[] = {
|
||||
sizeof(float), // kFloat_GrVertexAttribType
|
||||
2*sizeof(float), // kVec2f_GrVertexAttribType
|
||||
@ -136,7 +136,7 @@ static const int kGrFixedFunctionVertexAttribBindingCnt =
|
||||
kLastFixedFunction_GrVertexAttribBinding + 1;
|
||||
|
||||
static inline int GrFixedFunctionVertexAttribVectorCount(GrVertexAttribBinding binding) {
|
||||
GrAssert(binding >= 0 && binding < kGrFixedFunctionVertexAttribBindingCnt);
|
||||
SkASSERT(binding >= 0 && binding < kGrFixedFunctionVertexAttribBindingCnt);
|
||||
static const int kVecCounts[] = { 2, 2, 4, 4 };
|
||||
|
||||
return kVecCounts[binding];
|
||||
|
@ -20,7 +20,7 @@
|
||||
class GrGLExtensions {
|
||||
public:
|
||||
bool init(GrGLBinding binding, const GrGLInterface* iface) {
|
||||
GrAssert(binding & iface->fBindingsExported);
|
||||
SkASSERT(binding & iface->fBindingsExported);
|
||||
return this->init(binding, iface->fGetString, iface->fGetStringi, iface->fGetIntegerv);
|
||||
}
|
||||
/**
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
virtual void makeCurrent() const = 0;
|
||||
|
||||
bool hasExtension(const char* extensionName) const {
|
||||
GrAssert(NULL != fGL);
|
||||
SkASSERT(NULL != fGL);
|
||||
return fExtensions.has(extensionName);
|
||||
}
|
||||
|
||||
|
@ -960,7 +960,7 @@ public:
|
||||
} else {
|
||||
dstColor = builder->dstColor();
|
||||
}
|
||||
GrAssert(NULL != dstColor);
|
||||
SkASSERT(NULL != dstColor);
|
||||
|
||||
// We don't try to optimize for this case at all
|
||||
if (NULL == inputColor) {
|
||||
|
@ -103,7 +103,7 @@ static void center_of_mass(const SegmentArray& segments, SkPoint* c) {
|
||||
// undo the translate of p0 to the origin.
|
||||
*c = center + p0;
|
||||
}
|
||||
GrAssert(!SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY));
|
||||
SkASSERT(!SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY));
|
||||
}
|
||||
|
||||
static void compute_vectors(SegmentArray* segments,
|
||||
@ -209,7 +209,7 @@ static inline bool get_direction(const SkPath& path, const SkMatrix& m, SkPath::
|
||||
return false;
|
||||
}
|
||||
// check whether m reverses the orientation
|
||||
GrAssert(!m.hasPerspective());
|
||||
SkASSERT(!m.hasPerspective());
|
||||
SkScalar det2x2 = SkScalarMul(m.get(SkMatrix::kMScaleX), m.get(SkMatrix::kMScaleY)) -
|
||||
SkScalarMul(m.get(SkMatrix::kMSkewX), m.get(SkMatrix::kMSkewY));
|
||||
if (det2x2 < 0) {
|
||||
@ -656,7 +656,7 @@ bool GrAAConvexPathRenderer::onDrawPath(const SkPath& origPath,
|
||||
if (!arg.succeeded()) {
|
||||
return false;
|
||||
}
|
||||
GrAssert(sizeof(QuadVertex) == drawState->getVertexSize());
|
||||
SkASSERT(sizeof(QuadVertex) == drawState->getVertexSize());
|
||||
verts = reinterpret_cast<QuadVertex*>(arg.vertices());
|
||||
idxs = reinterpret_cast<uint16_t*>(arg.indices());
|
||||
|
||||
@ -678,7 +678,7 @@ bool GrAAConvexPathRenderer::onDrawPath(const SkPath& origPath,
|
||||
for (int i = 2; i < vCount; ++i) {
|
||||
actualBounds.growToInclude(verts[i].fPos.fX, verts[i].fPos.fY);
|
||||
}
|
||||
GrAssert(tolDevBounds.contains(actualBounds));
|
||||
SkASSERT(tolDevBounds.contains(actualBounds));
|
||||
#endif
|
||||
|
||||
int vOffset = 0;
|
||||
|
@ -171,17 +171,17 @@ int get_float_exp(float x) {
|
||||
static bool tested;
|
||||
if (!tested) {
|
||||
tested = true;
|
||||
GrAssert(get_float_exp(0.25f) == -2);
|
||||
GrAssert(get_float_exp(0.3f) == -2);
|
||||
GrAssert(get_float_exp(0.5f) == -1);
|
||||
GrAssert(get_float_exp(1.f) == 0);
|
||||
GrAssert(get_float_exp(2.f) == 1);
|
||||
GrAssert(get_float_exp(2.5f) == 1);
|
||||
GrAssert(get_float_exp(8.f) == 3);
|
||||
GrAssert(get_float_exp(100.f) == 6);
|
||||
GrAssert(get_float_exp(1000.f) == 9);
|
||||
GrAssert(get_float_exp(1024.f) == 10);
|
||||
GrAssert(get_float_exp(3000000.f) == 21);
|
||||
SkASSERT(get_float_exp(0.25f) == -2);
|
||||
SkASSERT(get_float_exp(0.3f) == -2);
|
||||
SkASSERT(get_float_exp(0.5f) == -1);
|
||||
SkASSERT(get_float_exp(1.f) == 0);
|
||||
SkASSERT(get_float_exp(2.f) == 1);
|
||||
SkASSERT(get_float_exp(2.5f) == 1);
|
||||
SkASSERT(get_float_exp(8.f) == 3);
|
||||
SkASSERT(get_float_exp(100.f) == 6);
|
||||
SkASSERT(get_float_exp(1000.f) == 9);
|
||||
SkASSERT(get_float_exp(1024.f) == 10);
|
||||
SkASSERT(get_float_exp(3000000.f) == 21);
|
||||
}
|
||||
#endif
|
||||
const int* iptr = (const int*)&x;
|
||||
@ -392,7 +392,7 @@ int generate_lines_and_quads(const SkPath& path,
|
||||
|
||||
if (SkIRect::Intersects(devClipBounds, ibounds)) {
|
||||
int subdiv = num_quad_subdivs(devPts);
|
||||
GrAssert(subdiv >= -1);
|
||||
SkASSERT(subdiv >= -1);
|
||||
if (-1 == subdiv) {
|
||||
SkPoint* pts = lines->push_back_n(4);
|
||||
pts[0] = devPts[0];
|
||||
@ -448,7 +448,7 @@ int generate_lines_and_quads(const SkPath& path,
|
||||
bounds.roundOut(&ibounds);
|
||||
if (SkIRect::Intersects(devClipBounds, ibounds)) {
|
||||
int subdiv = num_quad_subdivs(qInDevSpace);
|
||||
GrAssert(subdiv >= -1);
|
||||
SkASSERT(subdiv >= -1);
|
||||
if (-1 == subdiv) {
|
||||
SkPoint* pts = lines->push_back_n(4);
|
||||
// lines should always be in device coords
|
||||
@ -527,7 +527,7 @@ void set_uv_quad(const SkPoint qpts[3], BezierVertex verts[kVertsPerQuad]) {
|
||||
void bloat_quad(const SkPoint qpts[3], const SkMatrix* toDevice,
|
||||
const SkMatrix* toSrc, BezierVertex verts[kVertsPerQuad],
|
||||
SkRect* devBounds) {
|
||||
GrAssert(!toDevice == !toSrc);
|
||||
SkASSERT(!toDevice == !toSrc);
|
||||
// original quad is specified by tri a,b,c
|
||||
SkPoint a = qpts[0];
|
||||
SkPoint b = qpts[1];
|
||||
@ -564,7 +564,7 @@ void bloat_quad(const SkPoint qpts[3], const SkMatrix* toDevice,
|
||||
cb -= c;
|
||||
|
||||
// We should have already handled degenerates
|
||||
GrAssert(ab.length() > 0 && cb.length() > 0);
|
||||
SkASSERT(ab.length() > 0 && cb.length() > 0);
|
||||
|
||||
ab.normalize();
|
||||
SkVector abN;
|
||||
@ -629,7 +629,7 @@ void calc_conic_klm(const SkPoint p[3], const SkScalar weight,
|
||||
scale = SkMaxScalar(scale, SkScalarAbs(l[i]));
|
||||
scale = SkMaxScalar(scale, SkScalarAbs(m[i]));
|
||||
}
|
||||
GrAssert(scale > 0);
|
||||
SkASSERT(scale > 0);
|
||||
scale /= 10.0f;
|
||||
k[0] /= scale;
|
||||
k[1] /= scale;
|
||||
@ -681,7 +681,7 @@ void add_quads(const SkPoint p[3],
|
||||
const SkMatrix* toSrc,
|
||||
BezierVertex** vert,
|
||||
SkRect* devBounds) {
|
||||
GrAssert(subdiv >= 0);
|
||||
SkASSERT(subdiv >= 0);
|
||||
if (subdiv) {
|
||||
SkPoint newP[5];
|
||||
SkChopQuadAtHalf(p, newP);
|
||||
@ -1015,7 +1015,7 @@ bool GrAAHairLinePathRenderer::createLineGeom(
|
||||
int vertCnt = kVertsPerLineSeg * lineCnt;
|
||||
|
||||
target->drawState()->setVertexAttribs<gHairlineLineAttribs>(SK_ARRAY_COUNT(gHairlineLineAttribs));
|
||||
GrAssert(sizeof(LineVertex) == target->getDrawState().getVertexSize());
|
||||
SkASSERT(sizeof(LineVertex) == target->getDrawState().getVertexSize());
|
||||
|
||||
if (!arg->set(target, vertCnt, 0)) {
|
||||
return false;
|
||||
@ -1064,7 +1064,7 @@ bool GrAAHairLinePathRenderer::createBezierGeom(
|
||||
int vertCnt = kVertsPerQuad * quadCnt + kVertsPerQuad * conicCnt;
|
||||
|
||||
target->drawState()->setVertexAttribs<gHairlineBezierAttribs>(SK_ARRAY_COUNT(gHairlineBezierAttribs));
|
||||
GrAssert(sizeof(BezierVertex) == target->getDrawState().getVertexSize());
|
||||
SkASSERT(sizeof(BezierVertex) == target->getDrawState().getVertexSize());
|
||||
|
||||
if (!arg->set(target, vertCnt, 0)) {
|
||||
return false;
|
||||
@ -1085,7 +1085,7 @@ bool GrAAHairLinePathRenderer::createBezierGeom(
|
||||
|
||||
int unsubdivQuadCnt = quads.count() / 3;
|
||||
for (int i = 0; i < unsubdivQuadCnt; ++i) {
|
||||
GrAssert(qSubdivs[i] >= 0);
|
||||
SkASSERT(qSubdivs[i] >= 0);
|
||||
add_quads(&quads[3*i], qSubdivs[i], toDevice, toSrc, &verts, devBounds);
|
||||
}
|
||||
|
||||
|
@ -398,7 +398,7 @@ void GrAARectRenderer::geometryFillAARect(GrGpu* gpu,
|
||||
|
||||
intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices());
|
||||
size_t vsize = drawState->getVertexSize();
|
||||
GrAssert(sizeof(GrPoint) + sizeof(GrColor) == vsize);
|
||||
SkASSERT(sizeof(GrPoint) + sizeof(GrColor) == vsize);
|
||||
|
||||
GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts);
|
||||
GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize);
|
||||
@ -545,7 +545,7 @@ void GrAARectRenderer::shaderFillAARect(GrGpu* gpu,
|
||||
SkScalar newWidth = SkScalarHalf(rect.width() * vec[0].length()) + SK_ScalarHalf;
|
||||
SkScalar newHeight = SkScalarHalf(rect.height() * vec[1].length()) + SK_ScalarHalf;
|
||||
drawState->setVertexAttribs<gAARectVertexAttribs>(SK_ARRAY_COUNT(gAARectVertexAttribs));
|
||||
GrAssert(sizeof(RectVertex) == drawState->getVertexSize());
|
||||
SkASSERT(sizeof(RectVertex) == drawState->getVertexSize());
|
||||
|
||||
GrDrawTarget::AutoReleaseGeometry geo(target, 4, 0);
|
||||
if (!geo.succeeded()) {
|
||||
@ -595,7 +595,7 @@ void GrAARectRenderer::shaderFillAlignedAARect(GrGpu* gpu,
|
||||
SkASSERT(combinedMatrix.rectStaysRect());
|
||||
|
||||
drawState->setVertexAttribs<gAAAARectVertexAttribs>(SK_ARRAY_COUNT(gAAAARectVertexAttribs));
|
||||
GrAssert(sizeof(AARectVertex) == drawState->getVertexSize());
|
||||
SkASSERT(sizeof(AARectVertex) == drawState->getVertexSize());
|
||||
|
||||
GrDrawTarget::AutoReleaseGeometry geo(target, 4, 0);
|
||||
if (!geo.succeeded()) {
|
||||
@ -718,7 +718,7 @@ void GrAARectRenderer::geometryStrokeAARect(GrGpu* gpu,
|
||||
|
||||
intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices());
|
||||
size_t vsize = drawState->getVertexSize();
|
||||
GrAssert(sizeof(GrPoint) + sizeof(GrColor) == vsize);
|
||||
SkASSERT(sizeof(GrPoint) + sizeof(GrColor) == vsize);
|
||||
|
||||
// We create vertices for four nested rectangles. There are two ramps from 0 to full
|
||||
// coverage, one on the exterior of the stroke and the other on the interior.
|
||||
|
@ -19,7 +19,7 @@ struct GrAllocPool::Block {
|
||||
size_t fBytesTotal;
|
||||
|
||||
static Block* Create(size_t size, Block* next) {
|
||||
GrAssert(size >= GrAllocPool_MIN_BLOCK_SIZE);
|
||||
SkASSERT(size >= GrAllocPool_MIN_BLOCK_SIZE);
|
||||
|
||||
Block* block = (Block*)GrMalloc(sizeof(Block) + size);
|
||||
block->fNext = next;
|
||||
@ -34,7 +34,7 @@ struct GrAllocPool::Block {
|
||||
}
|
||||
|
||||
void* alloc(size_t bytes) {
|
||||
GrAssert(bytes <= fBytesFree);
|
||||
SkASSERT(bytes <= fBytesFree);
|
||||
fBytesFree -= bytes;
|
||||
void* ptr = fPtr;
|
||||
fPtr += bytes;
|
||||
@ -42,7 +42,7 @@ struct GrAllocPool::Block {
|
||||
}
|
||||
|
||||
size_t release(size_t bytes) {
|
||||
GrAssert(bytes > 0);
|
||||
SkASSERT(bytes > 0);
|
||||
size_t free = GrMin(bytes, fBytesTotal - fBytesFree);
|
||||
fBytesFree += free;
|
||||
fPtr -= free;
|
||||
@ -112,7 +112,7 @@ void GrAllocPool::validate() const {
|
||||
count += 1;
|
||||
block = block->fNext;
|
||||
}
|
||||
GrAssert(fBlocksAllocated == count);
|
||||
SkASSERT(fBlocksAllocated == count);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
fItemsPerBlock(itemsPerBlock),
|
||||
fOwnFirstBlock(NULL == initialBlock),
|
||||
fCount(0) {
|
||||
GrAssert(itemsPerBlock > 0);
|
||||
SkASSERT(itemsPerBlock > 0);
|
||||
fBlockSize = fItemSize * fItemsPerBlock;
|
||||
fBlocks.push_back() = initialBlock;
|
||||
GR_DEBUGCODE(if (!fOwnFirstBlock) {*((char*)initialBlock+fBlockSize-1)='a';} );
|
||||
@ -95,7 +95,7 @@ public:
|
||||
* access last item, only call if count() != 0
|
||||
*/
|
||||
void* back() {
|
||||
GrAssert(fCount);
|
||||
SkASSERT(fCount);
|
||||
return (*this)[fCount-1];
|
||||
}
|
||||
|
||||
@ -103,7 +103,7 @@ public:
|
||||
* access last item, only call if count() != 0
|
||||
*/
|
||||
const void* back() const {
|
||||
GrAssert(fCount);
|
||||
SkASSERT(fCount);
|
||||
return (*this)[fCount-1];
|
||||
}
|
||||
|
||||
@ -111,7 +111,7 @@ public:
|
||||
* access item by index.
|
||||
*/
|
||||
void* operator[] (int i) {
|
||||
GrAssert(i >= 0 && i < fCount);
|
||||
SkASSERT(i >= 0 && i < fCount);
|
||||
return (char*)fBlocks[i / fItemsPerBlock] +
|
||||
fItemSize * (i % fItemsPerBlock);
|
||||
}
|
||||
@ -120,7 +120,7 @@ public:
|
||||
* access item by index.
|
||||
*/
|
||||
const void* operator[] (int i) const {
|
||||
GrAssert(i >= 0 && i < fCount);
|
||||
SkASSERT(i >= 0 && i < fCount);
|
||||
return (const char*)fBlocks[i / fItemsPerBlock] +
|
||||
fItemSize * (i % fItemsPerBlock);
|
||||
}
|
||||
@ -162,14 +162,14 @@ public:
|
||||
*/
|
||||
T& push_back() {
|
||||
void* item = fAllocator.push_back();
|
||||
GrAssert(NULL != item);
|
||||
SkASSERT(NULL != item);
|
||||
SkNEW_PLACEMENT(item, T);
|
||||
return *(T*)item;
|
||||
}
|
||||
|
||||
T& push_back(const T& t) {
|
||||
void* item = fAllocator.push_back();
|
||||
GrAssert(NULL != item);
|
||||
SkASSERT(NULL != item);
|
||||
SkNEW_PLACEMENT_ARGS(item, T, (t));
|
||||
return *(T*)item;
|
||||
}
|
||||
|
@ -198,7 +198,7 @@ GrAtlas* GrAtlasMgr::addToAtlas(GrAtlas** atlas,
|
||||
int width, int height, const void* image,
|
||||
GrMaskFormat format,
|
||||
GrIPoint16* loc) {
|
||||
GrAssert(NULL == *atlas || (*atlas)->getMaskFormat() == format);
|
||||
SkASSERT(NULL == *atlas || (*atlas)->getMaskFormat() == format);
|
||||
|
||||
// iterate through entire atlas list, see if we can find a hole
|
||||
GrAtlas* atlasIter = *atlas;
|
||||
|
@ -72,7 +72,7 @@ public:
|
||||
void deleteAtlas(GrAtlas* atlas) { delete atlas; }
|
||||
|
||||
GrTexture* getTexture(GrMaskFormat format) const {
|
||||
GrAssert((unsigned)format < kCount_GrMaskFormats);
|
||||
SkASSERT((unsigned)format < kCount_GrMaskFormats);
|
||||
return fTexture[format];
|
||||
}
|
||||
|
||||
|
@ -52,7 +52,7 @@ public:
|
||||
}
|
||||
|
||||
void setKeyData(const uint32_t* SK_RESTRICT data) {
|
||||
GrAssert(GrIsALIGN4(KEY_SIZE));
|
||||
SkASSERT(GrIsALIGN4(KEY_SIZE));
|
||||
memcpy(&fData, data, KEY_SIZE);
|
||||
|
||||
uint32_t hash = 0;
|
||||
@ -73,27 +73,27 @@ public:
|
||||
}
|
||||
|
||||
int compare(const GrTBinHashKey<ENTRY, KEY_SIZE>& key) const {
|
||||
GrAssert(fIsValid && key.fIsValid);
|
||||
SkASSERT(fIsValid && key.fIsValid);
|
||||
return memcmp(fData, key.fData, KEY_SIZE);
|
||||
}
|
||||
|
||||
static bool EQ(const ENTRY& entry, const GrTBinHashKey<ENTRY, KEY_SIZE>& key) {
|
||||
GrAssert(key.fIsValid);
|
||||
SkASSERT(key.fIsValid);
|
||||
return 0 == entry.compare(key);
|
||||
}
|
||||
|
||||
static bool LT(const ENTRY& entry, const GrTBinHashKey<ENTRY, KEY_SIZE>& key) {
|
||||
GrAssert(key.fIsValid);
|
||||
SkASSERT(key.fIsValid);
|
||||
return entry.compare(key) < 0;
|
||||
}
|
||||
|
||||
uint32_t getHash() const {
|
||||
GrAssert(fIsValid);
|
||||
SkASSERT(fIsValid);
|
||||
return fHash;
|
||||
}
|
||||
|
||||
const uint8_t* getData() const {
|
||||
GrAssert(fIsValid);
|
||||
SkASSERT(fIsValid);
|
||||
return fData;
|
||||
}
|
||||
|
||||
|
@ -58,8 +58,8 @@ static GrColor simplify_blend_term(GrBlendCoeff* srcCoeff,
|
||||
GrColor dstColor, uint32_t dstCompFlags,
|
||||
GrColor constantColor) {
|
||||
|
||||
GrAssert(!GrBlendCoeffRefsSrc(*srcCoeff));
|
||||
GrAssert(NULL != srcCoeff);
|
||||
SkASSERT(!GrBlendCoeffRefsSrc(*srcCoeff));
|
||||
SkASSERT(NULL != srcCoeff);
|
||||
|
||||
// Check whether srcCoeff can be reduced to kOne or kZero based on known color inputs.
|
||||
// We could pick out the coeff r,g,b,a values here and use them to compute the blend term color,
|
||||
|
@ -30,7 +30,7 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
|
||||
int preallocBufferCnt) :
|
||||
fBlocks(GrMax(8, 2*preallocBufferCnt)) {
|
||||
|
||||
GrAssert(NULL != gpu);
|
||||
SkASSERT(NULL != gpu);
|
||||
fGpu = gpu;
|
||||
fGpu->ref();
|
||||
fGpuIsReffed = true;
|
||||
@ -97,7 +97,7 @@ void GrBufferAllocPool::reset() {
|
||||
// we may have created a large cpu mirror of a large VB. Reset the size
|
||||
// to match our pre-allocated VBs.
|
||||
fCpuData.reset(fMinBlockSize);
|
||||
GrAssert(0 == fPreallocBuffersInUse);
|
||||
SkASSERT(0 == fPreallocBuffersInUse);
|
||||
VALIDATE();
|
||||
}
|
||||
|
||||
@ -120,32 +120,32 @@ void GrBufferAllocPool::unlock() {
|
||||
#if GR_DEBUG
|
||||
void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
|
||||
if (NULL != fBufferPtr) {
|
||||
GrAssert(!fBlocks.empty());
|
||||
SkASSERT(!fBlocks.empty());
|
||||
if (fBlocks.back().fBuffer->isLocked()) {
|
||||
GrGeometryBuffer* buf = fBlocks.back().fBuffer;
|
||||
GrAssert(buf->lockPtr() == fBufferPtr);
|
||||
SkASSERT(buf->lockPtr() == fBufferPtr);
|
||||
} else {
|
||||
GrAssert(fCpuData.get() == fBufferPtr);
|
||||
SkASSERT(fCpuData.get() == fBufferPtr);
|
||||
}
|
||||
} else {
|
||||
GrAssert(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked());
|
||||
SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked());
|
||||
}
|
||||
size_t bytesInUse = 0;
|
||||
for (int i = 0; i < fBlocks.count() - 1; ++i) {
|
||||
GrAssert(!fBlocks[i].fBuffer->isLocked());
|
||||
SkASSERT(!fBlocks[i].fBuffer->isLocked());
|
||||
}
|
||||
for (int i = 0; i < fBlocks.count(); ++i) {
|
||||
size_t bytes = fBlocks[i].fBuffer->sizeInBytes() - fBlocks[i].fBytesFree;
|
||||
bytesInUse += bytes;
|
||||
GrAssert(bytes || unusedBlockAllowed);
|
||||
SkASSERT(bytes || unusedBlockAllowed);
|
||||
}
|
||||
|
||||
GrAssert(bytesInUse == fBytesInUse);
|
||||
SkASSERT(bytesInUse == fBytesInUse);
|
||||
if (unusedBlockAllowed) {
|
||||
GrAssert((fBytesInUse && !fBlocks.empty()) ||
|
||||
SkASSERT((fBytesInUse && !fBlocks.empty()) ||
|
||||
(!fBytesInUse && (fBlocks.count() < 2)));
|
||||
} else {
|
||||
GrAssert((0 == fBytesInUse) == fBlocks.empty());
|
||||
SkASSERT((0 == fBytesInUse) == fBlocks.empty());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -156,8 +156,8 @@ void* GrBufferAllocPool::makeSpace(size_t size,
|
||||
size_t* offset) {
|
||||
VALIDATE();
|
||||
|
||||
GrAssert(NULL != buffer);
|
||||
GrAssert(NULL != offset);
|
||||
SkASSERT(NULL != buffer);
|
||||
SkASSERT(NULL != offset);
|
||||
|
||||
if (NULL != fBufferPtr) {
|
||||
BufferBlock& back = fBlocks.back();
|
||||
@ -186,7 +186,7 @@ void* GrBufferAllocPool::makeSpace(size_t size,
|
||||
if (!createBlock(size)) {
|
||||
return NULL;
|
||||
}
|
||||
GrAssert(NULL != fBufferPtr);
|
||||
SkASSERT(NULL != fBufferPtr);
|
||||
|
||||
*offset = 0;
|
||||
BufferBlock& back = fBlocks.back();
|
||||
@ -229,7 +229,7 @@ void GrBufferAllocPool::putBack(size_t bytes) {
|
||||
|
||||
while (bytes) {
|
||||
// caller shouldnt try to put back more than they've taken
|
||||
GrAssert(!fBlocks.empty());
|
||||
SkASSERT(!fBlocks.empty());
|
||||
BufferBlock& block = fBlocks.back();
|
||||
size_t bytesUsed = block.fBuffer->sizeInBytes() - block.fBytesFree;
|
||||
if (bytes >= bytesUsed) {
|
||||
@ -259,7 +259,7 @@ void GrBufferAllocPool::putBack(size_t bytes) {
|
||||
bool GrBufferAllocPool::createBlock(size_t requestSize) {
|
||||
|
||||
size_t size = GrMax(requestSize, fMinBlockSize);
|
||||
GrAssert(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
|
||||
SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
|
||||
|
||||
VALIDATE();
|
||||
|
||||
@ -284,7 +284,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
|
||||
|
||||
block.fBytesFree = size;
|
||||
if (NULL != fBufferPtr) {
|
||||
GrAssert(fBlocks.count() > 1);
|
||||
SkASSERT(fBlocks.count() > 1);
|
||||
BufferBlock& prev = fBlocks.fromBack(1);
|
||||
if (prev.fBuffer->isLocked()) {
|
||||
prev.fBuffer->unlock();
|
||||
@ -295,7 +295,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
|
||||
fBufferPtr = NULL;
|
||||
}
|
||||
|
||||
GrAssert(NULL == fBufferPtr);
|
||||
SkASSERT(NULL == fBufferPtr);
|
||||
|
||||
// If the buffer is CPU-backed we lock it because it is free to do so and saves a copy.
|
||||
// Otherwise when buffer locking is supported:
|
||||
@ -325,7 +325,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
|
||||
}
|
||||
|
||||
void GrBufferAllocPool::destroyBlock() {
|
||||
GrAssert(!fBlocks.empty());
|
||||
SkASSERT(!fBlocks.empty());
|
||||
|
||||
BufferBlock& block = fBlocks.back();
|
||||
if (fPreallocBuffersInUse > 0) {
|
||||
@ -337,7 +337,7 @@ void GrBufferAllocPool::destroyBlock() {
|
||||
--fPreallocBuffersInUse;
|
||||
}
|
||||
}
|
||||
GrAssert(!block.fBuffer->isLocked());
|
||||
SkASSERT(!block.fBuffer->isLocked());
|
||||
block.fBuffer->unref();
|
||||
fBlocks.pop_back();
|
||||
fBufferPtr = NULL;
|
||||
@ -345,10 +345,10 @@ void GrBufferAllocPool::destroyBlock() {
|
||||
|
||||
void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer,
|
||||
size_t flushSize) {
|
||||
GrAssert(NULL != buffer);
|
||||
GrAssert(!buffer->isLocked());
|
||||
GrAssert(fCpuData.get() == fBufferPtr);
|
||||
GrAssert(flushSize <= buffer->sizeInBytes());
|
||||
SkASSERT(NULL != buffer);
|
||||
SkASSERT(!buffer->isLocked());
|
||||
SkASSERT(fCpuData.get() == fBufferPtr);
|
||||
SkASSERT(flushSize <= buffer->sizeInBytes());
|
||||
VALIDATE(true);
|
||||
|
||||
if (fGpu->caps()->bufferLockSupport() &&
|
||||
@ -368,7 +368,7 @@ GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
|
||||
if (kIndex_BufferType == fBufferType) {
|
||||
return fGpu->createIndexBuffer(size, true);
|
||||
} else {
|
||||
GrAssert(kVertex_BufferType == fBufferType);
|
||||
SkASSERT(kVertex_BufferType == fBufferType);
|
||||
return fGpu->createVertexBuffer(size, true);
|
||||
}
|
||||
}
|
||||
@ -391,9 +391,9 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
|
||||
const GrVertexBuffer** buffer,
|
||||
int* startVertex) {
|
||||
|
||||
GrAssert(vertexCount >= 0);
|
||||
GrAssert(NULL != buffer);
|
||||
GrAssert(NULL != startVertex);
|
||||
SkASSERT(vertexCount >= 0);
|
||||
SkASSERT(NULL != buffer);
|
||||
SkASSERT(NULL != startVertex);
|
||||
|
||||
size_t offset = 0; // assign to suppress warning
|
||||
const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
|
||||
@ -403,7 +403,7 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
|
||||
&offset);
|
||||
|
||||
*buffer = (const GrVertexBuffer*) geomBuffer;
|
||||
GrAssert(0 == offset % vertexSize);
|
||||
SkASSERT(0 == offset % vertexSize);
|
||||
*startVertex = offset / vertexSize;
|
||||
return ptr;
|
||||
}
|
||||
@ -449,9 +449,9 @@ void* GrIndexBufferAllocPool::makeSpace(int indexCount,
|
||||
const GrIndexBuffer** buffer,
|
||||
int* startIndex) {
|
||||
|
||||
GrAssert(indexCount >= 0);
|
||||
GrAssert(NULL != buffer);
|
||||
GrAssert(NULL != startIndex);
|
||||
SkASSERT(indexCount >= 0);
|
||||
SkASSERT(NULL != buffer);
|
||||
SkASSERT(NULL != startIndex);
|
||||
|
||||
size_t offset = 0; // assign to suppress warning
|
||||
const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
|
||||
@ -461,7 +461,7 @@ void* GrIndexBufferAllocPool::makeSpace(int indexCount,
|
||||
&offset);
|
||||
|
||||
*buffer = (const GrIndexBuffer*) geomBuffer;
|
||||
GrAssert(0 == offset % sizeof(uint16_t));
|
||||
SkASSERT(0 == offset % sizeof(uint16_t));
|
||||
*startIndex = offset / sizeof(uint16_t);
|
||||
return ptr;
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ public:
|
||||
|
||||
void reset() {
|
||||
if (fStack.empty()) {
|
||||
// GrAssert(false);
|
||||
// SkASSERT(false);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -73,7 +73,7 @@ public:
|
||||
void push();
|
||||
|
||||
void pop() {
|
||||
//GrAssert(!fStack.empty());
|
||||
//SkASSERT(!fStack.empty());
|
||||
|
||||
if (!fStack.empty()) {
|
||||
GrClipStackFrame* back = (GrClipStackFrame*) fStack.back();
|
||||
@ -95,7 +95,7 @@ public:
|
||||
GrTexture* getLastMask() {
|
||||
|
||||
if (fStack.empty()) {
|
||||
GrAssert(false);
|
||||
SkASSERT(false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -107,7 +107,7 @@ public:
|
||||
const GrTexture* getLastMask() const {
|
||||
|
||||
if (fStack.empty()) {
|
||||
GrAssert(false);
|
||||
SkASSERT(false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -121,7 +121,7 @@ public:
|
||||
const SkIRect& bound) {
|
||||
|
||||
if (fStack.empty()) {
|
||||
GrAssert(false);
|
||||
SkASSERT(false);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -133,7 +133,7 @@ public:
|
||||
int getLastMaskWidth() const {
|
||||
|
||||
if (fStack.empty()) {
|
||||
GrAssert(false);
|
||||
SkASSERT(false);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -149,7 +149,7 @@ public:
|
||||
int getLastMaskHeight() const {
|
||||
|
||||
if (fStack.empty()) {
|
||||
GrAssert(false);
|
||||
SkASSERT(false);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -165,7 +165,7 @@ public:
|
||||
void getLastBound(SkIRect* bound) const {
|
||||
|
||||
if (fStack.empty()) {
|
||||
GrAssert(false);
|
||||
SkASSERT(false);
|
||||
bound->setEmpty();
|
||||
return;
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ void setup_drawstate_aaclip(GrGpu* gpu,
|
||||
GrTexture* result,
|
||||
const SkIRect &devBound) {
|
||||
GrDrawState* drawState = gpu->drawState();
|
||||
GrAssert(drawState);
|
||||
SkASSERT(drawState);
|
||||
|
||||
SkMatrix mat;
|
||||
// We want to use device coords to compute the texture coordinates. We set our matrix to be
|
||||
@ -121,7 +121,7 @@ bool GrClipMaskManager::setupClipping(const GrClipData* clipDataIn,
|
||||
|
||||
const GrRenderTarget* rt = drawState->getRenderTarget();
|
||||
// GrDrawTarget should have filtered this for us
|
||||
GrAssert(NULL != rt);
|
||||
SkASSERT(NULL != rt);
|
||||
|
||||
bool ignoreClip = !drawState->isClipState() || clipDataIn->fClipStack->isWideOpen();
|
||||
|
||||
@ -258,7 +258,7 @@ void setup_boolean_blendcoeffs(GrDrawState* drawState, SkRegion::Op op) {
|
||||
drawState->setBlendFunc(kIDC_GrBlendCoeff, kZero_GrBlendCoeff);
|
||||
break;
|
||||
default:
|
||||
GrAssert(false);
|
||||
SkASSERT(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -423,7 +423,7 @@ GrTexture* GrClipMaskManager::createAlphaClipMask(int32_t clipStackGenID,
|
||||
InitialState initialState,
|
||||
const ElementList& elements,
|
||||
const SkIRect& clipSpaceIBounds) {
|
||||
GrAssert(kNone_ClipMaskType == fCurrClipMaskType);
|
||||
SkASSERT(kNone_ClipMaskType == fCurrClipMaskType);
|
||||
|
||||
GrTexture* result;
|
||||
if (this->getMaskTexture(clipStackGenID, clipSpaceIBounds, &result)) {
|
||||
@ -570,13 +570,13 @@ bool GrClipMaskManager::createStencilClipMask(InitialState initialState,
|
||||
const SkIRect& clipSpaceIBounds,
|
||||
const SkIPoint& clipSpaceToStencilOffset) {
|
||||
|
||||
GrAssert(kNone_ClipMaskType == fCurrClipMaskType);
|
||||
SkASSERT(kNone_ClipMaskType == fCurrClipMaskType);
|
||||
|
||||
GrDrawState* drawState = fGpu->drawState();
|
||||
GrAssert(drawState->isClipState());
|
||||
SkASSERT(drawState->isClipState());
|
||||
|
||||
GrRenderTarget* rt = drawState->getRenderTarget();
|
||||
GrAssert(NULL != rt);
|
||||
SkASSERT(NULL != rt);
|
||||
|
||||
// TODO: dynamically attach a SB when needed.
|
||||
GrStencilBuffer* stencilBuffer = rt->getStencilBuffer();
|
||||
@ -643,7 +643,7 @@ bool GrClipMaskManager::createStencilClipMask(InitialState initialState,
|
||||
stencilSupport = GrPathRenderer::kNoRestriction_StencilSupport;
|
||||
fillInverted = false;
|
||||
} else {
|
||||
GrAssert(Element::kPath_Type == element->getType());
|
||||
SkASSERT(Element::kPath_Type == element->getType());
|
||||
clipPath.init(element->getPath());
|
||||
fillInverted = clipPath->isInverseFillType();
|
||||
if (fillInverted) {
|
||||
@ -690,7 +690,7 @@ bool GrClipMaskManager::createStencilClipMask(InitialState initialState,
|
||||
*drawState->stencil() = gDrawToStencil;
|
||||
fGpu->drawSimpleRect(element->getRect(), NULL);
|
||||
} else {
|
||||
GrAssert(Element::kPath_Type == element->getType());
|
||||
SkASSERT(Element::kPath_Type == element->getType());
|
||||
if (!clipPath->isEmpty()) {
|
||||
if (canRenderDirectToStencil) {
|
||||
*drawState->stencil() = gDrawToStencil;
|
||||
@ -712,7 +712,7 @@ bool GrClipMaskManager::createStencilClipMask(InitialState initialState,
|
||||
SET_RANDOM_COLOR
|
||||
fGpu->drawSimpleRect(element->getRect(), NULL);
|
||||
} else {
|
||||
GrAssert(Element::kPath_Type == element->getType());
|
||||
SkASSERT(Element::kPath_Type == element->getType());
|
||||
SET_RANDOM_COLOR
|
||||
pr->drawPath(*clipPath, stroke, fGpu, false);
|
||||
}
|
||||
@ -726,7 +726,7 @@ bool GrClipMaskManager::createStencilClipMask(InitialState initialState,
|
||||
}
|
||||
}
|
||||
// set this last because recursive draws may overwrite it back to kNone.
|
||||
GrAssert(kNone_ClipMaskType == fCurrClipMaskType);
|
||||
SkASSERT(kNone_ClipMaskType == fCurrClipMaskType);
|
||||
fCurrClipMaskType = kStencil_ClipMaskType;
|
||||
return true;
|
||||
}
|
||||
@ -798,7 +798,7 @@ void GrClipMaskManager::setGpuStencil() {
|
||||
if (this->isClipInStencil() && drawState.isClipState()) {
|
||||
clipMode = GrClipMaskManager::kRespectClip_StencilClipMode;
|
||||
// We can't be modifying the clip and respecting it at the same time.
|
||||
GrAssert(!drawState.isStateFlagEnabled(
|
||||
SkASSERT(!drawState.isStateFlagEnabled(
|
||||
GrGpu::kModifyStencilClip_StateBit));
|
||||
} else if (drawState.isStateFlagEnabled(
|
||||
GrGpu::kModifyStencilClip_StateBit)) {
|
||||
@ -829,8 +829,8 @@ void GrClipMaskManager::setGpuStencil() {
|
||||
stencilBits = stencilBuffer->bits();
|
||||
}
|
||||
|
||||
GrAssert(fGpu->caps()->stencilWrapOpsSupport() || !settings.usesWrapOp());
|
||||
GrAssert(fGpu->caps()->twoSidedStencilSupport() || !settings.isTwoSided());
|
||||
SkASSERT(fGpu->caps()->stencilWrapOpsSupport() || !settings.usesWrapOp());
|
||||
SkASSERT(fGpu->caps()->twoSidedStencilSupport() || !settings.isTwoSided());
|
||||
this->adjustStencilParams(&settings, clipMode, stencilBits);
|
||||
fGpu->setStencilSettings(settings);
|
||||
}
|
||||
@ -838,7 +838,7 @@ void GrClipMaskManager::setGpuStencil() {
|
||||
void GrClipMaskManager::adjustStencilParams(GrStencilSettings* settings,
|
||||
StencilClipMode mode,
|
||||
int stencilBitCnt) {
|
||||
GrAssert(stencilBitCnt > 0);
|
||||
SkASSERT(stencilBitCnt > 0);
|
||||
|
||||
if (kModifyClip_StencilClipMode == mode) {
|
||||
// We assume that this clip manager itself is drawing to the GrGpu and
|
||||
@ -859,7 +859,7 @@ void GrClipMaskManager::adjustStencilParams(GrStencilSettings* settings,
|
||||
uint16_t funcMask = settings->funcMask(face);
|
||||
uint16_t funcRef = settings->funcRef(face);
|
||||
|
||||
GrAssert((unsigned) func < kStencilFuncCount);
|
||||
SkASSERT((unsigned) func < kStencilFuncCount);
|
||||
|
||||
writeMask &= userBits;
|
||||
|
||||
@ -867,7 +867,7 @@ void GrClipMaskManager::adjustStencilParams(GrStencilSettings* settings,
|
||||
int respectClip = kRespectClip_StencilClipMode == mode;
|
||||
if (respectClip) {
|
||||
// The GrGpu class should have checked this
|
||||
GrAssert(this->isClipInStencil());
|
||||
SkASSERT(this->isClipInStencil());
|
||||
switch (func) {
|
||||
case kAlwaysIfInClip_StencilFunc:
|
||||
funcMask = clipBit;
|
||||
@ -893,7 +893,7 @@ void GrClipMaskManager::adjustStencilParams(GrStencilSettings* settings,
|
||||
const GrStencilFunc* table =
|
||||
gSpecialToBasicStencilFunc[respectClip];
|
||||
func = table[func - kBasicStencilFuncCount];
|
||||
GrAssert(func >= 0 && func < kBasicStencilFuncCount);
|
||||
SkASSERT(func >= 0 && func < kBasicStencilFuncCount);
|
||||
} else {
|
||||
funcMask &= userBits;
|
||||
funcRef &= userBits;
|
||||
@ -921,7 +921,7 @@ GrTexture* GrClipMaskManager::createSoftwareClipMask(int32_t clipStackGenID,
|
||||
GrReducedClip::InitialState initialState,
|
||||
const GrReducedClip::ElementList& elements,
|
||||
const SkIRect& clipSpaceIBounds) {
|
||||
GrAssert(kNone_ClipMaskType == fCurrClipMaskType);
|
||||
SkASSERT(kNone_ClipMaskType == fCurrClipMaskType);
|
||||
|
||||
GrTexture* result;
|
||||
if (this->getMaskTexture(clipStackGenID, clipSpaceIBounds, &result)) {
|
||||
@ -974,7 +974,7 @@ GrTexture* GrClipMaskManager::createSoftwareClipMask(int32_t clipStackGenID,
|
||||
element->isAA(),
|
||||
0x00);
|
||||
} else {
|
||||
GrAssert(Element::kPath_Type == element->getType());
|
||||
SkASSERT(Element::kPath_Type == element->getType());
|
||||
SkPath clipPath = element->getPath();
|
||||
clipPath.toggleInverseFillType();
|
||||
helper.draw(clipPath, stroke,
|
||||
@ -991,7 +991,7 @@ GrTexture* GrClipMaskManager::createSoftwareClipMask(int32_t clipStackGenID,
|
||||
if (Element::kRect_Type == element->getType()) {
|
||||
helper.draw(element->getRect(), op, element->isAA(), 0xFF);
|
||||
} else {
|
||||
GrAssert(Element::kPath_Type == element->getType());
|
||||
SkASSERT(Element::kPath_Type == element->getType());
|
||||
helper.draw(element->getPath(), stroke, op, element->isAA(), 0xFF);
|
||||
}
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
|
||||
static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
|
||||
static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
|
||||
|
||||
#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this)
|
||||
#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
|
||||
|
||||
// Glorified typedef to avoid including GrDrawState.h in GrContext.h
|
||||
class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
|
||||
@ -107,7 +107,7 @@ GrContext::GrContext() {
|
||||
}
|
||||
|
||||
bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
|
||||
GrAssert(NULL == fGpu);
|
||||
SkASSERT(NULL == fGpu);
|
||||
|
||||
fGpu = GrGpu::Create(backend, backendContext, this);
|
||||
if (NULL == fGpu) {
|
||||
@ -367,7 +367,7 @@ GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
|
||||
|
||||
SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedPixels.get(),
|
||||
stretchedRowBytes);
|
||||
GrAssert(NULL != texture);
|
||||
SkASSERT(NULL != texture);
|
||||
}
|
||||
|
||||
return texture;
|
||||
@ -418,11 +418,11 @@ static GrTexture* create_scratch_texture(GrGpu* gpu,
|
||||
|
||||
GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
|
||||
|
||||
GrAssert((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
|
||||
SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
|
||||
!(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
|
||||
|
||||
// Renderable A8 targets are not universally supported (e.g., not on ANGLE)
|
||||
GrAssert(this->isConfigRenderable(kAlpha_8_GrPixelConfig) ||
|
||||
SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig) ||
|
||||
!(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
|
||||
(inDesc.fConfig != kAlpha_8_GrPixelConfig));
|
||||
|
||||
@ -487,11 +487,11 @@ void GrContext::addExistingTextureToCache(GrTexture* texture) {
|
||||
|
||||
// This texture should already have a cache entry since it was once
|
||||
// attached
|
||||
GrAssert(NULL != texture->getCacheEntry());
|
||||
SkASSERT(NULL != texture->getCacheEntry());
|
||||
|
||||
// Conceptually, the cache entry is going to assume responsibility
|
||||
// for the creation ref.
|
||||
GrAssert(texture->unique());
|
||||
SkASSERT(texture->unique());
|
||||
|
||||
// Since this texture came from an AutoScratchTexture it should
|
||||
// still be in the exclusive pile
|
||||
@ -509,7 +509,7 @@ void GrContext::addExistingTextureToCache(GrTexture* texture) {
|
||||
|
||||
void GrContext::unlockScratchTexture(GrTexture* texture) {
|
||||
ASSERT_OWNED_RESOURCE(texture);
|
||||
GrAssert(NULL != texture->getCacheEntry());
|
||||
SkASSERT(NULL != texture->getCacheEntry());
|
||||
|
||||
// If this is a scratch texture we detached it from the cache
|
||||
// while it was locked (to avoid two callers simultaneously getting
|
||||
@ -527,7 +527,7 @@ void GrContext::purgeCache() {
|
||||
}
|
||||
|
||||
bool GrContext::OverbudgetCB(void* data) {
|
||||
GrAssert(NULL != data);
|
||||
SkASSERT(NULL != data);
|
||||
|
||||
GrContext* context = reinterpret_cast<GrContext*>(data);
|
||||
|
||||
@ -1369,7 +1369,7 @@ bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
|
||||
// can be invoked in this method
|
||||
GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit);
|
||||
GrDrawState* drawState = fGpu->drawState();
|
||||
GrAssert(effect);
|
||||
SkASSERT(effect);
|
||||
drawState->addColorEffect(effect);
|
||||
|
||||
drawState->setRenderTarget(texture->asRenderTarget());
|
||||
@ -1397,10 +1397,10 @@ bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
|
||||
grconfig_to_config8888(dstConfig, unpremul, &dstC8888);
|
||||
|
||||
if (swapRAndB) {
|
||||
GrAssert(c8888IsValid); // we should only do r/b swap on 8888 configs
|
||||
SkASSERT(c8888IsValid); // we should only do r/b swap on 8888 configs
|
||||
srcC8888 = swap_config8888_red_and_blue(srcC8888);
|
||||
}
|
||||
GrAssert(c8888IsValid);
|
||||
SkASSERT(c8888IsValid);
|
||||
uint32_t* b32 = reinterpret_cast<uint32_t*>(buffer);
|
||||
SkConvertConfig8888Pixels(b32, rowBytes, dstC8888,
|
||||
b32, rowBytes, srcC8888,
|
||||
@ -1410,7 +1410,7 @@ bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
|
||||
}
|
||||
|
||||
void GrContext::resolveRenderTarget(GrRenderTarget* target) {
|
||||
GrAssert(target);
|
||||
SkASSERT(target);
|
||||
ASSERT_OWNED_RESOURCE(target);
|
||||
// In the future we may track whether there are any pending draws to this
|
||||
// target. We don't today so we always perform a flush. We don't promise
|
||||
@ -1528,10 +1528,10 @@ bool GrContext::writeRenderTargetPixels(GrRenderTarget* target,
|
||||
SkCanvas::Config8888 srcConfig8888, dstConfig8888;
|
||||
GR_DEBUGCODE(bool success = )
|
||||
grconfig_to_config8888(srcConfig, true, &srcConfig8888);
|
||||
GrAssert(success);
|
||||
SkASSERT(success);
|
||||
GR_DEBUGCODE(success = )
|
||||
grconfig_to_config8888(srcConfig, false, &dstConfig8888);
|
||||
GrAssert(success);
|
||||
SkASSERT(success);
|
||||
const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer);
|
||||
tmpPixels.reset(width * height);
|
||||
SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888,
|
||||
@ -1562,7 +1562,7 @@ bool GrContext::writeRenderTargetPixels(GrRenderTarget* target,
|
||||
matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
|
||||
GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix);
|
||||
GrDrawState* drawState = fGpu->drawState();
|
||||
GrAssert(effect);
|
||||
SkASSERT(effect);
|
||||
drawState->addColorEffect(effect);
|
||||
|
||||
drawState->setRenderTarget(target);
|
||||
@ -1577,7 +1577,7 @@ GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
|
||||
AutoRestoreEffects* are) {
|
||||
// All users of this draw state should be freeing up all effects when they're done.
|
||||
// Otherwise effects that own resources may keep those resources alive indefinitely.
|
||||
GrAssert(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages());
|
||||
SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages());
|
||||
|
||||
if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
|
||||
fDrawBuffer->flush();
|
||||
@ -1585,7 +1585,7 @@ GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
|
||||
}
|
||||
ASSERT_OWNED_RESOURCE(fRenderTarget.get());
|
||||
if (NULL != paint) {
|
||||
GrAssert(NULL != are);
|
||||
SkASSERT(NULL != are);
|
||||
are->set(fDrawState);
|
||||
fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
|
||||
#if GR_DEBUG_PARTIAL_COVERAGE_CHECK
|
||||
@ -1603,14 +1603,14 @@ GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
|
||||
fLastDrawWasBuffered = kYes_BufferedDraw;
|
||||
target = fDrawBuffer;
|
||||
} else {
|
||||
GrAssert(kNo_BufferedDraw == buffered);
|
||||
SkASSERT(kNo_BufferedDraw == buffered);
|
||||
fLastDrawWasBuffered = kNo_BufferedDraw;
|
||||
target = fGpu;
|
||||
}
|
||||
fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip &&
|
||||
!fClip->fClipStack->isWideOpen());
|
||||
target->setClip(fClip);
|
||||
GrAssert(fDrawState == target->drawState());
|
||||
SkASSERT(fDrawState == target->drawState());
|
||||
return target;
|
||||
}
|
||||
|
||||
@ -1664,10 +1664,9 @@ static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) {
|
||||
}
|
||||
|
||||
void GrContext::setupDrawBuffer() {
|
||||
|
||||
GrAssert(NULL == fDrawBuffer);
|
||||
GrAssert(NULL == fDrawBufferVBAllocPool);
|
||||
GrAssert(NULL == fDrawBufferIBAllocPool);
|
||||
SkASSERT(NULL == fDrawBuffer);
|
||||
SkASSERT(NULL == fDrawBufferVBAllocPool);
|
||||
SkASSERT(NULL == fDrawBufferIBAllocPool);
|
||||
|
||||
fDrawBufferVBAllocPool =
|
||||
SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
|
||||
|
@ -241,7 +241,7 @@ bool GrDefaultPathRenderer::createGeom(const SkPath& path,
|
||||
uint16_t subpathIdxStart = 0;
|
||||
|
||||
GrPoint* base = reinterpret_cast<GrPoint*>(arg->vertices());
|
||||
GrAssert(NULL != base);
|
||||
SkASSERT(NULL != base);
|
||||
GrPoint* vert = base;
|
||||
|
||||
GrPoint pts[4];
|
||||
@ -314,8 +314,8 @@ bool GrDefaultPathRenderer::createGeom(const SkPath& path,
|
||||
first = false;
|
||||
}
|
||||
FINISHED:
|
||||
GrAssert((vert - base) <= maxPts);
|
||||
GrAssert((idx - idxBase) <= maxIdxs);
|
||||
SkASSERT((vert - base) <= maxPts);
|
||||
SkASSERT((idx - idxBase) <= maxIdxs);
|
||||
|
||||
*vertexCnt = vert - base;
|
||||
*indexCnt = idx - idxBase;
|
||||
@ -348,12 +348,12 @@ bool GrDefaultPathRenderer::internalDrawPath(const SkPath& path,
|
||||
return false;
|
||||
}
|
||||
|
||||
GrAssert(NULL != target);
|
||||
SkASSERT(NULL != target);
|
||||
GrDrawTarget::AutoStateRestore asr(target, GrDrawTarget::kPreserve_ASRInit);
|
||||
GrDrawState* drawState = target->drawState();
|
||||
bool colorWritesWereDisabled = drawState->isColorWriteDisabled();
|
||||
// face culling doesn't make sense here
|
||||
GrAssert(GrDrawState::kBoth_DrawFace == drawState->getDrawFace());
|
||||
SkASSERT(GrDrawState::kBoth_DrawFace == drawState->getDrawFace());
|
||||
|
||||
int passCount = 0;
|
||||
const GrStencilSettings* passes[3];
|
||||
@ -441,7 +441,7 @@ bool GrDefaultPathRenderer::internalDrawPath(const SkPath& path,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
GrAssert(!"Unknown path fFill!");
|
||||
SkASSERT(!"Unknown path fFill!");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -463,7 +463,7 @@ bool GrDefaultPathRenderer::internalDrawPath(const SkPath& path,
|
||||
SkRect bounds;
|
||||
GrDrawState::AutoViewMatrixRestore avmr;
|
||||
if (reverse) {
|
||||
GrAssert(NULL != drawState->getRenderTarget());
|
||||
SkASSERT(NULL != drawState->getRenderTarget());
|
||||
// draw over the dev bounds (which will be the whole dst surface for inv fill).
|
||||
bounds = devBounds;
|
||||
SkMatrix vmi;
|
||||
@ -515,7 +515,7 @@ bool GrDefaultPathRenderer::onDrawPath(const SkPath& path,
|
||||
void GrDefaultPathRenderer::onStencilPath(const SkPath& path,
|
||||
const SkStrokeRec& stroke,
|
||||
GrDrawTarget* target) {
|
||||
GrAssert(SkPath::kInverseEvenOdd_FillType != path.getFillType());
|
||||
GrAssert(SkPath::kInverseWinding_FillType != path.getFillType());
|
||||
SkASSERT(SkPath::kInverseEvenOdd_FillType != path.getFillType());
|
||||
SkASSERT(SkPath::kInverseWinding_FillType != path.getFillType());
|
||||
this->internalDrawPath(path, stroke, target, true);
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ bool GrDrawState::setIdentityViewMatrix() {
|
||||
}
|
||||
|
||||
void GrDrawState::setFromPaint(const GrPaint& paint, const SkMatrix& vm, GrRenderTarget* rt) {
|
||||
GrAssert(0 == fBlockEffectRemovalCnt || 0 == this->numTotalStages());
|
||||
SkASSERT(0 == fBlockEffectRemovalCnt || 0 == this->numTotalStages());
|
||||
|
||||
fColorStages.reset();
|
||||
fCoverageStages.reset();
|
||||
@ -70,7 +70,7 @@ static size_t vertex_size(const GrVertexAttrib* attribs, int count) {
|
||||
#if GR_DEBUG
|
||||
uint32_t overlapCheck = 0;
|
||||
#endif
|
||||
GrAssert(count <= GrDrawState::kMaxVertexAttribCnt);
|
||||
SkASSERT(count <= GrDrawState::kMaxVertexAttribCnt);
|
||||
size_t size = 0;
|
||||
for (int index = 0; index < count; ++index) {
|
||||
size_t attribSize = GrVertexAttribTypeSize(attribs[index].fType);
|
||||
@ -79,7 +79,7 @@ static size_t vertex_size(const GrVertexAttrib* attribs, int count) {
|
||||
size_t dwordCount = attribSize >> 2;
|
||||
uint32_t mask = (1 << dwordCount)-1;
|
||||
size_t offsetShift = attribs[index].fOffset >> 2;
|
||||
GrAssert(!(overlapCheck & (mask << offsetShift)));
|
||||
SkASSERT(!(overlapCheck & (mask << offsetShift)));
|
||||
overlapCheck |= (mask << offsetShift);
|
||||
#endif
|
||||
}
|
||||
@ -93,7 +93,7 @@ size_t GrDrawState::getVertexSize() const {
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void GrDrawState::setVertexAttribs(const GrVertexAttrib* attribs, int count) {
|
||||
GrAssert(count <= kMaxVertexAttribCnt);
|
||||
SkASSERT(count <= kMaxVertexAttribCnt);
|
||||
|
||||
fCommon.fVAPtr = attribs;
|
||||
fCommon.fVACount = count;
|
||||
@ -108,8 +108,8 @@ void GrDrawState::setVertexAttribs(const GrVertexAttrib* attribs, int count) {
|
||||
for (int i = 0; i < count; ++i) {
|
||||
if (attribs[i].fBinding < kGrFixedFunctionVertexAttribBindingCnt) {
|
||||
// The fixed function attribs can only be specified once
|
||||
GrAssert(-1 == fCommon.fFixedFunctionVertexAttribIndices[attribs[i].fBinding]);
|
||||
GrAssert(GrFixedFunctionVertexAttribVectorCount(attribs[i].fBinding) ==
|
||||
SkASSERT(-1 == fCommon.fFixedFunctionVertexAttribIndices[attribs[i].fBinding]);
|
||||
SkASSERT(GrFixedFunctionVertexAttribVectorCount(attribs[i].fBinding) ==
|
||||
GrVertexAttribTypeVectorCount(attribs[i].fType));
|
||||
fCommon.fFixedFunctionVertexAttribIndices[attribs[i].fBinding] = i;
|
||||
}
|
||||
@ -117,12 +117,12 @@ void GrDrawState::setVertexAttribs(const GrVertexAttrib* attribs, int count) {
|
||||
size_t dwordCount = GrVertexAttribTypeSize(attribs[i].fType) >> 2;
|
||||
uint32_t mask = (1 << dwordCount)-1;
|
||||
size_t offsetShift = attribs[i].fOffset >> 2;
|
||||
GrAssert(!(overlapCheck & (mask << offsetShift)));
|
||||
SkASSERT(!(overlapCheck & (mask << offsetShift)));
|
||||
overlapCheck |= (mask << offsetShift);
|
||||
#endif
|
||||
}
|
||||
// Positions must be specified.
|
||||
GrAssert(-1 != fCommon.fFixedFunctionVertexAttribIndices[kPosition_GrVertexAttribBinding]);
|
||||
SkASSERT(-1 != fCommon.fFixedFunctionVertexAttribIndices[kPosition_GrVertexAttribBinding]);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -154,7 +154,7 @@ bool GrDrawState::validateVertexAttribs() const {
|
||||
int covIdx = s - fColorStages.count();
|
||||
const GrEffectStage& stage = covIdx < 0 ? fColorStages[s] : fCoverageStages[covIdx];
|
||||
const GrEffectRef* effect = stage.getEffect();
|
||||
GrAssert(NULL != effect);
|
||||
SkASSERT(NULL != effect);
|
||||
// make sure that any attribute indices have the correct binding type, that the attrib
|
||||
// type and effect's shader lang type are compatible, and that attributes shared by
|
||||
// multiple effects use the same shader lang type.
|
||||
@ -402,9 +402,9 @@ void GrDrawState::AutoViewMatrixRestore::restore() {
|
||||
if (NULL != fDrawState) {
|
||||
GR_DEBUGCODE(--fDrawState->fBlockEffectRemovalCnt;)
|
||||
fDrawState->fCommon.fViewMatrix = fViewMatrix;
|
||||
GrAssert(fDrawState->numColorStages() >= fNumColorStages);
|
||||
SkASSERT(fDrawState->numColorStages() >= fNumColorStages);
|
||||
int numCoverageStages = fSavedCoordChanges.count() - fNumColorStages;
|
||||
GrAssert(fDrawState->numCoverageStages() >= numCoverageStages);
|
||||
SkASSERT(fDrawState->numCoverageStages() >= numCoverageStages);
|
||||
|
||||
int i = 0;
|
||||
for (int s = 0; s < fNumColorStages; ++s, ++i) {
|
||||
@ -421,7 +421,7 @@ void GrDrawState::AutoViewMatrixRestore::set(GrDrawState* drawState,
|
||||
const SkMatrix& preconcatMatrix) {
|
||||
this->restore();
|
||||
|
||||
GrAssert(NULL == fDrawState);
|
||||
SkASSERT(NULL == fDrawState);
|
||||
if (NULL == drawState || preconcatMatrix.isIdentity()) {
|
||||
return;
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~GrDrawState() { GrAssert(0 == fBlockEffectRemovalCnt); }
|
||||
virtual ~GrDrawState() { SkASSERT(0 == fBlockEffectRemovalCnt); }
|
||||
|
||||
/**
|
||||
* Resets to the default state. GrEffects will be removed from all stages.
|
||||
@ -152,7 +152,7 @@ public:
|
||||
class AutoVertexAttribRestore {
|
||||
public:
|
||||
AutoVertexAttribRestore(GrDrawState* drawState) {
|
||||
GrAssert(NULL != drawState);
|
||||
SkASSERT(NULL != drawState);
|
||||
fDrawState = drawState;
|
||||
fVAPtr = drawState->fCommon.fVAPtr;
|
||||
fVACount = drawState->fCommon.fVACount;
|
||||
@ -359,13 +359,13 @@ public:
|
||||
////
|
||||
|
||||
const GrEffectRef* addColorEffect(const GrEffectRef* effect, int attr0 = -1, int attr1 = -1) {
|
||||
GrAssert(NULL != effect);
|
||||
SkASSERT(NULL != effect);
|
||||
SkNEW_APPEND_TO_TARRAY(&fColorStages, GrEffectStage, (effect, attr0, attr1));
|
||||
return effect;
|
||||
}
|
||||
|
||||
const GrEffectRef* addCoverageEffect(const GrEffectRef* effect, int attr0 = -1, int attr1 = -1) {
|
||||
GrAssert(NULL != effect);
|
||||
SkASSERT(NULL != effect);
|
||||
SkNEW_APPEND_TO_TARRAY(&fCoverageStages, GrEffectStage, (effect, attr0, attr1));
|
||||
return effect;
|
||||
}
|
||||
@ -414,10 +414,10 @@ public:
|
||||
void set(GrDrawState* ds) {
|
||||
if (NULL != fDrawState) {
|
||||
int n = fDrawState->fColorStages.count() - fColorEffectCnt;
|
||||
GrAssert(n >= 0);
|
||||
SkASSERT(n >= 0);
|
||||
fDrawState->fColorStages.pop_back_n(n);
|
||||
n = fDrawState->fCoverageStages.count() - fCoverageEffectCnt;
|
||||
GrAssert(n >= 0);
|
||||
SkASSERT(n >= 0);
|
||||
fDrawState->fCoverageStages.pop_back_n(n);
|
||||
GR_DEBUGCODE(--fDrawState->fBlockEffectRemovalCnt;)
|
||||
}
|
||||
@ -682,7 +682,7 @@ public:
|
||||
this->restore();
|
||||
|
||||
if (NULL != ds) {
|
||||
GrAssert(NULL == fSavedTarget);
|
||||
SkASSERT(NULL == fSavedTarget);
|
||||
fSavedTarget = ds->getRenderTarget();
|
||||
SkSafeRef(fSavedTarget);
|
||||
ds->setRenderTarget(newTarget);
|
||||
@ -847,7 +847,7 @@ public:
|
||||
* @param face the face(s) to draw.
|
||||
*/
|
||||
void setDrawFace(DrawFace face) {
|
||||
GrAssert(kInvalid_DrawFace != face);
|
||||
SkASSERT(kInvalid_DrawFace != face);
|
||||
fCommon.fDrawFace = face;
|
||||
}
|
||||
|
||||
@ -884,7 +884,7 @@ public:
|
||||
bool operator !=(const GrDrawState& s) const { return !(*this == s); }
|
||||
|
||||
GrDrawState& operator= (const GrDrawState& s) {
|
||||
GrAssert(0 == fBlockEffectRemovalCnt || 0 == this->numTotalStages());
|
||||
SkASSERT(0 == fBlockEffectRemovalCnt || 0 == this->numTotalStages());
|
||||
this->setRenderTarget(s.fRenderTarget.get());
|
||||
fCommon = s.fCommon;
|
||||
fColorStages = s.fColorStages;
|
||||
@ -895,7 +895,7 @@ public:
|
||||
private:
|
||||
|
||||
void onReset(const SkMatrix* initialViewMatrix) {
|
||||
GrAssert(0 == fBlockEffectRemovalCnt || 0 == this->numTotalStages());
|
||||
SkASSERT(0 == fBlockEffectRemovalCnt || 0 == this->numTotalStages());
|
||||
fColorStages.reset();
|
||||
fCoverageStages.reset();
|
||||
|
||||
@ -955,7 +955,7 @@ private:
|
||||
fColorFilterMode == other.fColorFilterMode &&
|
||||
fColorFilterColor == other.fColorFilterColor &&
|
||||
fDrawFace == other.fDrawFace;
|
||||
GrAssert(!result || 0 == memcmp(fFixedFunctionVertexAttribIndices,
|
||||
SkASSERT(!result || 0 == memcmp(fFixedFunctionVertexAttribIndices,
|
||||
other.fFixedFunctionVertexAttribIndices,
|
||||
sizeof(fFixedFunctionVertexAttribIndices)));
|
||||
return result;
|
||||
@ -1006,7 +1006,7 @@ public:
|
||||
}
|
||||
|
||||
void restoreTo(GrDrawState* drawState) {
|
||||
GrAssert(fInitialized);
|
||||
SkASSERT(fInitialized);
|
||||
drawState->fCommon = fCommon;
|
||||
drawState->setRenderTarget(fRenderTarget);
|
||||
// reinflate color/cov stage arrays.
|
||||
|
@ -33,7 +33,7 @@ GrDrawTarget::DrawInfo& GrDrawTarget::DrawInfo::operator =(const DrawInfo& di) {
|
||||
fIndicesPerInstance = di.fIndicesPerInstance;
|
||||
|
||||
if (NULL != di.fDevBounds) {
|
||||
GrAssert(di.fDevBounds == &di.fDevBoundsStorage);
|
||||
SkASSERT(di.fDevBounds == &di.fDevBoundsStorage);
|
||||
fDevBoundsStorage = di.fDevBoundsStorage;
|
||||
fDevBounds = &fDevBoundsStorage;
|
||||
} else {
|
||||
@ -48,24 +48,24 @@ GrDrawTarget::DrawInfo& GrDrawTarget::DrawInfo::operator =(const DrawInfo& di) {
|
||||
#if GR_DEBUG
|
||||
bool GrDrawTarget::DrawInfo::isInstanced() const {
|
||||
if (fInstanceCount > 0) {
|
||||
GrAssert(0 == fIndexCount % fIndicesPerInstance);
|
||||
GrAssert(0 == fVertexCount % fVerticesPerInstance);
|
||||
GrAssert(fIndexCount / fIndicesPerInstance == fInstanceCount);
|
||||
GrAssert(fVertexCount / fVerticesPerInstance == fInstanceCount);
|
||||
SkASSERT(0 == fIndexCount % fIndicesPerInstance);
|
||||
SkASSERT(0 == fVertexCount % fVerticesPerInstance);
|
||||
SkASSERT(fIndexCount / fIndicesPerInstance == fInstanceCount);
|
||||
SkASSERT(fVertexCount / fVerticesPerInstance == fInstanceCount);
|
||||
// there is no way to specify a non-zero start index to drawIndexedInstances().
|
||||
GrAssert(0 == fStartIndex);
|
||||
SkASSERT(0 == fStartIndex);
|
||||
return true;
|
||||
} else {
|
||||
GrAssert(!fVerticesPerInstance);
|
||||
GrAssert(!fIndicesPerInstance);
|
||||
SkASSERT(!fVerticesPerInstance);
|
||||
SkASSERT(!fIndicesPerInstance);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void GrDrawTarget::DrawInfo::adjustInstanceCount(int instanceOffset) {
|
||||
GrAssert(this->isInstanced());
|
||||
GrAssert(instanceOffset + fInstanceCount >= 0);
|
||||
SkASSERT(this->isInstanced());
|
||||
SkASSERT(instanceOffset + fInstanceCount >= 0);
|
||||
fInstanceCount += instanceOffset;
|
||||
fVertexCount = fVerticesPerInstance * fInstanceCount;
|
||||
fIndexCount = fIndicesPerInstance * fInstanceCount;
|
||||
@ -73,13 +73,13 @@ void GrDrawTarget::DrawInfo::adjustInstanceCount(int instanceOffset) {
|
||||
|
||||
void GrDrawTarget::DrawInfo::adjustStartVertex(int vertexOffset) {
|
||||
fStartVertex += vertexOffset;
|
||||
GrAssert(fStartVertex >= 0);
|
||||
SkASSERT(fStartVertex >= 0);
|
||||
}
|
||||
|
||||
void GrDrawTarget::DrawInfo::adjustStartIndex(int indexOffset) {
|
||||
GrAssert(this->isIndexed());
|
||||
SkASSERT(this->isIndexed());
|
||||
fStartIndex += indexOffset;
|
||||
GrAssert(fStartIndex >= 0);
|
||||
SkASSERT(fStartIndex >= 0);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -90,7 +90,7 @@ void GrDrawTarget::DrawInfo::adjustStartIndex(int indexOffset) {
|
||||
GrDrawTarget::GrDrawTarget(GrContext* context)
|
||||
: fClip(NULL)
|
||||
, fContext(context) {
|
||||
GrAssert(NULL != context);
|
||||
SkASSERT(NULL != context);
|
||||
|
||||
fDrawState = &fDefaultDrawState;
|
||||
// We assume that fDrawState always owns a ref to the object it points at.
|
||||
@ -107,10 +107,10 @@ GrDrawTarget::GrDrawTarget(GrContext* context)
|
||||
}
|
||||
|
||||
GrDrawTarget::~GrDrawTarget() {
|
||||
GrAssert(1 == fGeoSrcStateStack.count());
|
||||
SkASSERT(1 == fGeoSrcStateStack.count());
|
||||
SkDEBUGCODE(GeometrySrcState& geoSrc = fGeoSrcStateStack.back());
|
||||
GrAssert(kNone_GeometrySrcType == geoSrc.fIndexSrc);
|
||||
GrAssert(kNone_GeometrySrcType == geoSrc.fVertexSrc);
|
||||
SkASSERT(kNone_GeometrySrcType == geoSrc.fIndexSrc);
|
||||
SkASSERT(kNone_GeometrySrcType == geoSrc.fVertexSrc);
|
||||
fDrawState->unref();
|
||||
}
|
||||
|
||||
@ -134,7 +134,7 @@ const GrClipData* GrDrawTarget::getClip() const {
|
||||
}
|
||||
|
||||
void GrDrawTarget::setDrawState(GrDrawState* drawState) {
|
||||
GrAssert(NULL != fDrawState);
|
||||
SkASSERT(NULL != fDrawState);
|
||||
if (NULL == drawState) {
|
||||
drawState = &fDefaultDrawState;
|
||||
}
|
||||
@ -151,7 +151,7 @@ bool GrDrawTarget::reserveVertexSpace(size_t vertexSize,
|
||||
GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
|
||||
bool acquired = false;
|
||||
if (vertexCount > 0) {
|
||||
GrAssert(NULL != vertices);
|
||||
SkASSERT(NULL != vertices);
|
||||
this->releasePreviousVertexSource();
|
||||
geoSrc.fVertexSrc = kNone_GeometrySrcType;
|
||||
|
||||
@ -174,7 +174,7 @@ bool GrDrawTarget::reserveIndexSpace(int indexCount,
|
||||
GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
|
||||
bool acquired = false;
|
||||
if (indexCount > 0) {
|
||||
GrAssert(NULL != indices);
|
||||
SkASSERT(NULL != indices);
|
||||
this->releasePreviousIndexSource();
|
||||
geoSrc.fIndexSrc = kNone_GeometrySrcType;
|
||||
|
||||
@ -335,7 +335,7 @@ void GrDrawTarget::pushGeometrySource() {
|
||||
|
||||
void GrDrawTarget::popGeometrySource() {
|
||||
// if popping last element then pops are unbalanced with pushes
|
||||
GrAssert(fGeoSrcStateStack.count() > 1);
|
||||
SkASSERT(fGeoSrcStateStack.count() > 1);
|
||||
|
||||
this->geometrySourceWillPop(fGeoSrcStateStack.fromBack(1));
|
||||
this->releasePreviousVertexSource();
|
||||
@ -386,14 +386,14 @@ bool GrDrawTarget::checkDraw(GrPrimitiveType type, int startVertex,
|
||||
}
|
||||
}
|
||||
|
||||
GrAssert(NULL != drawState.getRenderTarget());
|
||||
SkASSERT(NULL != drawState.getRenderTarget());
|
||||
|
||||
for (int s = 0; s < drawState.numColorStages(); ++s) {
|
||||
const GrEffectRef& effect = *drawState.getColorStage(s).getEffect();
|
||||
int numTextures = effect->numTextures();
|
||||
for (int t = 0; t < numTextures; ++t) {
|
||||
GrTexture* texture = effect->texture(t);
|
||||
GrAssert(texture->asRenderTarget() != drawState.getRenderTarget());
|
||||
SkASSERT(texture->asRenderTarget() != drawState.getRenderTarget());
|
||||
}
|
||||
}
|
||||
for (int s = 0; s < drawState.numCoverageStages(); ++s) {
|
||||
@ -401,11 +401,11 @@ bool GrDrawTarget::checkDraw(GrPrimitiveType type, int startVertex,
|
||||
int numTextures = effect->numTextures();
|
||||
for (int t = 0; t < numTextures; ++t) {
|
||||
GrTexture* texture = effect->texture(t);
|
||||
GrAssert(texture->asRenderTarget() != drawState.getRenderTarget());
|
||||
SkASSERT(texture->asRenderTarget() != drawState.getRenderTarget());
|
||||
}
|
||||
}
|
||||
|
||||
GrAssert(drawState.validateVertexAttribs());
|
||||
SkASSERT(drawState.validateVertexAttribs());
|
||||
#endif
|
||||
if (NULL == drawState.getRenderTarget()) {
|
||||
return false;
|
||||
@ -517,10 +517,10 @@ void GrDrawTarget::drawNonIndexed(GrPrimitiveType type,
|
||||
|
||||
void GrDrawTarget::stencilPath(const GrPath* path, const SkStrokeRec& stroke, SkPath::FillType fill) {
|
||||
// TODO: extract portions of checkDraw that are relevant to path stenciling.
|
||||
GrAssert(NULL != path);
|
||||
GrAssert(this->caps()->pathStencilingSupport());
|
||||
GrAssert(!stroke.isHairlineStyle());
|
||||
GrAssert(!SkPath::IsInverseFillType(fill));
|
||||
SkASSERT(NULL != path);
|
||||
SkASSERT(this->caps()->pathStencilingSupport());
|
||||
SkASSERT(!stroke.isHairlineStyle());
|
||||
SkASSERT(!SkPath::IsInverseFillType(fill));
|
||||
this->onStencilPath(path, stroke, fill);
|
||||
}
|
||||
|
||||
@ -678,10 +678,10 @@ GrDrawTarget::AutoStateRestore::~AutoStateRestore() {
|
||||
}
|
||||
|
||||
void GrDrawTarget::AutoStateRestore::set(GrDrawTarget* target, ASRInit init, const SkMatrix* vm) {
|
||||
GrAssert(NULL == fDrawTarget);
|
||||
SkASSERT(NULL == fDrawTarget);
|
||||
fDrawTarget = target;
|
||||
fSavedState = target->drawState();
|
||||
GrAssert(fSavedState);
|
||||
SkASSERT(fSavedState);
|
||||
fSavedState->ref();
|
||||
if (kReset_ASRInit == init) {
|
||||
if (NULL == vm) {
|
||||
@ -691,7 +691,7 @@ void GrDrawTarget::AutoStateRestore::set(GrDrawTarget* target, ASRInit init, con
|
||||
SkNEW_IN_TLAZY(&fTempState, GrDrawState, (*vm));
|
||||
}
|
||||
} else {
|
||||
GrAssert(kPreserve_ASRInit == init);
|
||||
SkASSERT(kPreserve_ASRInit == init);
|
||||
if (NULL == vm) {
|
||||
fTempState.set(*fSavedState);
|
||||
} else {
|
||||
@ -702,16 +702,16 @@ void GrDrawTarget::AutoStateRestore::set(GrDrawTarget* target, ASRInit init, con
|
||||
}
|
||||
|
||||
bool GrDrawTarget::AutoStateRestore::setIdentity(GrDrawTarget* target, ASRInit init) {
|
||||
GrAssert(NULL == fDrawTarget);
|
||||
SkASSERT(NULL == fDrawTarget);
|
||||
fDrawTarget = target;
|
||||
fSavedState = target->drawState();
|
||||
GrAssert(fSavedState);
|
||||
SkASSERT(fSavedState);
|
||||
fSavedState->ref();
|
||||
if (kReset_ASRInit == init) {
|
||||
// calls the default cons
|
||||
fTempState.init();
|
||||
} else {
|
||||
GrAssert(kPreserve_ASRInit == init);
|
||||
SkASSERT(kPreserve_ASRInit == init);
|
||||
// calls the copy cons
|
||||
fTempState.set(*fSavedState);
|
||||
if (!fTempState.get()->setIdentityViewMatrix()) {
|
||||
@ -762,7 +762,7 @@ bool GrDrawTarget::AutoReleaseGeometry::set(GrDrawTarget* target,
|
||||
this->reset();
|
||||
}
|
||||
}
|
||||
GrAssert(success == (NULL != fTarget));
|
||||
SkASSERT(success == (NULL != fTarget));
|
||||
return success;
|
||||
}
|
||||
|
||||
@ -846,8 +846,8 @@ bool GrDrawTarget::copySurface(GrSurface* dst,
|
||||
GrSurface* src,
|
||||
const SkIRect& srcRect,
|
||||
const SkIPoint& dstPoint) {
|
||||
GrAssert(NULL != dst);
|
||||
GrAssert(NULL != src);
|
||||
SkASSERT(NULL != dst);
|
||||
SkASSERT(NULL != src);
|
||||
|
||||
SkIRect clippedSrcRect;
|
||||
SkIPoint clippedDstPoint;
|
||||
@ -858,12 +858,12 @@ bool GrDrawTarget::copySurface(GrSurface* dst,
|
||||
dstPoint,
|
||||
&clippedSrcRect,
|
||||
&clippedDstPoint)) {
|
||||
GrAssert(this->canCopySurface(dst, src, srcRect, dstPoint));
|
||||
SkASSERT(this->canCopySurface(dst, src, srcRect, dstPoint));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool result = this->onCopySurface(dst, src, clippedSrcRect, clippedDstPoint);
|
||||
GrAssert(result == this->canCopySurface(dst, src, clippedSrcRect, clippedDstPoint));
|
||||
SkASSERT(result == this->canCopySurface(dst, src, clippedSrcRect, clippedDstPoint));
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -871,8 +871,8 @@ bool GrDrawTarget::canCopySurface(GrSurface* dst,
|
||||
GrSurface* src,
|
||||
const SkIRect& srcRect,
|
||||
const SkIPoint& dstPoint) {
|
||||
GrAssert(NULL != dst);
|
||||
GrAssert(NULL != src);
|
||||
SkASSERT(NULL != dst);
|
||||
SkASSERT(NULL != src);
|
||||
|
||||
SkIRect clippedSrcRect;
|
||||
SkIPoint clippedDstPoint;
|
||||
@ -893,10 +893,10 @@ bool GrDrawTarget::onCanCopySurface(GrSurface* dst,
|
||||
const SkIRect& srcRect,
|
||||
const SkIPoint& dstPoint) {
|
||||
// Check that the read/write rects are contained within the src/dst bounds.
|
||||
GrAssert(!srcRect.isEmpty());
|
||||
GrAssert(SkIRect::MakeWH(src->width(), src->height()).contains(srcRect));
|
||||
GrAssert(dstPoint.fX >= 0 && dstPoint.fY >= 0);
|
||||
GrAssert(dstPoint.fX + srcRect.width() <= dst->width() &&
|
||||
SkASSERT(!srcRect.isEmpty());
|
||||
SkASSERT(SkIRect::MakeWH(src->width(), src->height()).contains(srcRect));
|
||||
SkASSERT(dstPoint.fX >= 0 && dstPoint.fY >= 0);
|
||||
SkASSERT(dstPoint.fX + srcRect.width() <= dst->width() &&
|
||||
dstPoint.fY + srcRect.height() <= dst->height());
|
||||
|
||||
return !dst->isSameAs(src) && NULL != dst->asRenderTarget() && NULL != src->asTexture();
|
||||
|
@ -539,8 +539,8 @@ public:
|
||||
int vertexCount,
|
||||
int indexCount);
|
||||
bool succeeded() const { return NULL != fTarget; }
|
||||
void* vertices() const { GrAssert(this->succeeded()); return fVertices; }
|
||||
void* indices() const { GrAssert(this->succeeded()); return fIndices; }
|
||||
void* vertices() const { SkASSERT(this->succeeded()); return fVertices; }
|
||||
void* indices() const { SkASSERT(this->succeeded()); return fIndices; }
|
||||
GrPoint* positions() const {
|
||||
return static_cast<GrPoint*>(this->vertices());
|
||||
}
|
||||
@ -584,7 +584,7 @@ public:
|
||||
public:
|
||||
AutoGeometryPush(GrDrawTarget* target)
|
||||
: fAttribRestore(target->drawState()) {
|
||||
GrAssert(NULL != target);
|
||||
SkASSERT(NULL != target);
|
||||
fTarget = target;
|
||||
target->pushGeometrySource();
|
||||
}
|
||||
@ -606,7 +606,7 @@ public:
|
||||
ASRInit init,
|
||||
const SkMatrix* viewMatrix = NULL)
|
||||
: fState(target, init, viewMatrix) {
|
||||
GrAssert(NULL != target);
|
||||
SkASSERT(NULL != target);
|
||||
fTarget = target;
|
||||
target->pushGeometrySource();
|
||||
if (kPreserve_ASRInit == init) {
|
||||
@ -719,7 +719,7 @@ protected:
|
||||
// it is preferable to call this rather than getGeomSrc()->fVertexSize because of the assert.
|
||||
size_t getVertexSize() const {
|
||||
// the vertex layout is only valid if a vertex source has been specified.
|
||||
GrAssert(this->getGeomSrc().fVertexSrc != kNone_GeometrySrcType);
|
||||
SkASSERT(this->getGeomSrc().fVertexSrc != kNone_GeometrySrcType);
|
||||
return this->getGeomSrc().fVertexSize;
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ int32_t GrBackendEffectFactory::fCurrEffectClassID = GrBackendEffectFactory::kIl
|
||||
SK_DEFINE_INST_COUNT(GrEffectRef)
|
||||
|
||||
GrEffectRef::~GrEffectRef() {
|
||||
GrAssert(this->unique());
|
||||
SkASSERT(this->unique());
|
||||
fEffect->EffectRefDestroyed();
|
||||
fEffect->unref();
|
||||
}
|
||||
@ -79,7 +79,7 @@ void GrEffectRef::operator delete(void* target) {
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
GrEffect::~GrEffect() {
|
||||
GrAssert(NULL == fEffectRef);
|
||||
SkASSERT(NULL == fEffectRef);
|
||||
}
|
||||
|
||||
const char* GrEffect::name() const {
|
||||
@ -91,7 +91,7 @@ void GrEffect::addTextureAccess(const GrTextureAccess* access) {
|
||||
}
|
||||
|
||||
void GrEffect::addVertexAttrib(GrSLType type) {
|
||||
GrAssert(fVertexAttribTypes.count() < kMaxVertexAttribs);
|
||||
SkASSERT(fVertexAttribTypes.count() < kMaxVertexAttribs);
|
||||
fVertexAttribTypes.push_back(type);
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ void GrGpu::abandonResources() {
|
||||
fResourceList.head()->abandon();
|
||||
}
|
||||
|
||||
GrAssert(NULL == fQuadIndexBuffer || !fQuadIndexBuffer->isValid());
|
||||
SkASSERT(NULL == fQuadIndexBuffer || !fQuadIndexBuffer->isValid());
|
||||
GrSafeSetNull(fQuadIndexBuffer);
|
||||
delete fVertexPool;
|
||||
fVertexPool = NULL;
|
||||
@ -81,7 +81,7 @@ void GrGpu::releaseResources() {
|
||||
fResourceList.head()->release();
|
||||
}
|
||||
|
||||
GrAssert(NULL == fQuadIndexBuffer || !fQuadIndexBuffer->isValid());
|
||||
SkASSERT(NULL == fQuadIndexBuffer || !fQuadIndexBuffer->isValid());
|
||||
GrSafeSetNull(fQuadIndexBuffer);
|
||||
delete fVertexPool;
|
||||
fVertexPool = NULL;
|
||||
@ -90,15 +90,15 @@ void GrGpu::releaseResources() {
|
||||
}
|
||||
|
||||
void GrGpu::insertResource(GrResource* resource) {
|
||||
GrAssert(NULL != resource);
|
||||
GrAssert(this == resource->getGpu());
|
||||
SkASSERT(NULL != resource);
|
||||
SkASSERT(this == resource->getGpu());
|
||||
|
||||
fResourceList.addToHead(resource);
|
||||
}
|
||||
|
||||
void GrGpu::removeResource(GrResource* resource) {
|
||||
GrAssert(NULL != resource);
|
||||
GrAssert(this == resource->getGpu());
|
||||
SkASSERT(NULL != resource);
|
||||
SkASSERT(this == resource->getGpu());
|
||||
|
||||
fResourceList.remove(resource);
|
||||
}
|
||||
@ -123,7 +123,7 @@ GrTexture* GrGpu::createTexture(const GrTextureDesc& desc,
|
||||
if (NULL != tex &&
|
||||
(kRenderTarget_GrTextureFlagBit & desc.fFlags) &&
|
||||
!(kNoStencil_GrTextureFlagBit & desc.fFlags)) {
|
||||
GrAssert(NULL != tex->asRenderTarget());
|
||||
SkASSERT(NULL != tex->asRenderTarget());
|
||||
// TODO: defer this and attach dynamically
|
||||
if (!this->attachStencilBufferToRenderTarget(tex->asRenderTarget())) {
|
||||
tex->unref();
|
||||
@ -134,7 +134,7 @@ GrTexture* GrGpu::createTexture(const GrTextureDesc& desc,
|
||||
}
|
||||
|
||||
bool GrGpu::attachStencilBufferToRenderTarget(GrRenderTarget* rt) {
|
||||
GrAssert(NULL == rt->getStencilBuffer());
|
||||
SkASSERT(NULL == rt->getStencilBuffer());
|
||||
GrStencilBuffer* sb =
|
||||
this->getContext()->findStencilBuffer(rt->width(),
|
||||
rt->height(),
|
||||
@ -198,7 +198,7 @@ GrIndexBuffer* GrGpu::createIndexBuffer(uint32_t size, bool dynamic) {
|
||||
}
|
||||
|
||||
GrPath* GrGpu::createPath(const SkPath& path) {
|
||||
GrAssert(this->caps()->pathStencilingSupport());
|
||||
SkASSERT(this->caps()->pathStencilingSupport());
|
||||
this->handleDirtyContext();
|
||||
return this->onCreatePath(path);
|
||||
}
|
||||
@ -211,7 +211,7 @@ void GrGpu::clear(const SkIRect* rect,
|
||||
art.set(this->drawState(), renderTarget);
|
||||
}
|
||||
if (NULL == this->getDrawState().getRenderTarget()) {
|
||||
GrAssert(0);
|
||||
SkASSERT(0);
|
||||
return;
|
||||
}
|
||||
this->handleDirtyContext();
|
||||
@ -242,7 +242,7 @@ bool GrGpu::writeTexturePixels(GrTexture* texture,
|
||||
}
|
||||
|
||||
void GrGpu::resolveRenderTarget(GrRenderTarget* target) {
|
||||
GrAssert(target);
|
||||
SkASSERT(target);
|
||||
this->handleDirtyContext();
|
||||
this->onResolveRenderTarget(target);
|
||||
}
|
||||
@ -331,7 +331,7 @@ void GrGpu::geometrySourceWillPush() {
|
||||
|
||||
void GrGpu::geometrySourceWillPop(const GeometrySrcState& restoredState) {
|
||||
// if popping last entry then pops are unbalanced with pushes
|
||||
GrAssert(fGeomPoolStateStack.count() > 1);
|
||||
SkASSERT(fGeomPoolStateStack.count() > 1);
|
||||
fGeomPoolStateStack.pop_back();
|
||||
}
|
||||
|
||||
@ -362,18 +362,18 @@ void GrGpu::onStencilPath(const GrPath* path, const SkStrokeRec&, SkPath::FillTy
|
||||
}
|
||||
|
||||
void GrGpu::finalizeReservedVertices() {
|
||||
GrAssert(NULL != fVertexPool);
|
||||
SkASSERT(NULL != fVertexPool);
|
||||
fVertexPool->unlock();
|
||||
}
|
||||
|
||||
void GrGpu::finalizeReservedIndices() {
|
||||
GrAssert(NULL != fIndexPool);
|
||||
SkASSERT(NULL != fIndexPool);
|
||||
fIndexPool->unlock();
|
||||
}
|
||||
|
||||
void GrGpu::prepareVertexPool() {
|
||||
if (NULL == fVertexPool) {
|
||||
GrAssert(0 == fVertexPoolUseCnt);
|
||||
SkASSERT(0 == fVertexPoolUseCnt);
|
||||
fVertexPool = SkNEW_ARGS(GrVertexBufferAllocPool, (this, true,
|
||||
VERTEX_POOL_VB_SIZE,
|
||||
VERTEX_POOL_VB_COUNT));
|
||||
@ -386,7 +386,7 @@ void GrGpu::prepareVertexPool() {
|
||||
|
||||
void GrGpu::prepareIndexPool() {
|
||||
if (NULL == fIndexPool) {
|
||||
GrAssert(0 == fIndexPoolUseCnt);
|
||||
SkASSERT(0 == fIndexPoolUseCnt);
|
||||
fIndexPool = SkNEW_ARGS(GrIndexBufferAllocPool, (this, true,
|
||||
INDEX_POOL_IB_SIZE,
|
||||
INDEX_POOL_IB_COUNT));
|
||||
@ -402,8 +402,8 @@ bool GrGpu::onReserveVertexSpace(size_t vertexSize,
|
||||
void** vertices) {
|
||||
GeometryPoolState& geomPoolState = fGeomPoolStateStack.back();
|
||||
|
||||
GrAssert(vertexCount > 0);
|
||||
GrAssert(NULL != vertices);
|
||||
SkASSERT(vertexCount > 0);
|
||||
SkASSERT(NULL != vertices);
|
||||
|
||||
this->prepareVertexPool();
|
||||
|
||||
@ -421,8 +421,8 @@ bool GrGpu::onReserveVertexSpace(size_t vertexSize,
|
||||
bool GrGpu::onReserveIndexSpace(int indexCount, void** indices) {
|
||||
GeometryPoolState& geomPoolState = fGeomPoolStateStack.back();
|
||||
|
||||
GrAssert(indexCount > 0);
|
||||
GrAssert(NULL != indices);
|
||||
SkASSERT(indexCount > 0);
|
||||
SkASSERT(NULL != indices);
|
||||
|
||||
this->prepareIndexPool();
|
||||
|
||||
@ -438,7 +438,7 @@ bool GrGpu::onReserveIndexSpace(int indexCount, void** indices) {
|
||||
|
||||
void GrGpu::releaseReservedVertexSpace() {
|
||||
const GeometrySrcState& geoSrc = this->getGeomSrc();
|
||||
GrAssert(kReserved_GeometrySrcType == geoSrc.fVertexSrc);
|
||||
SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc);
|
||||
size_t bytes = geoSrc.fVertexCount * geoSrc.fVertexSize;
|
||||
fVertexPool->putBack(bytes);
|
||||
--fVertexPoolUseCnt;
|
||||
@ -446,7 +446,7 @@ void GrGpu::releaseReservedVertexSpace() {
|
||||
|
||||
void GrGpu::releaseReservedIndexSpace() {
|
||||
const GeometrySrcState& geoSrc = this->getGeomSrc();
|
||||
GrAssert(kReserved_GeometrySrcType == geoSrc.fIndexSrc);
|
||||
SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc);
|
||||
size_t bytes = geoSrc.fIndexCount * sizeof(uint16_t);
|
||||
fIndexPool->putBack(bytes);
|
||||
--fIndexPoolUseCnt;
|
||||
@ -484,7 +484,7 @@ void GrGpu::onSetIndexSourceToArray(const void* indexArray, int indexCount) {
|
||||
void GrGpu::releaseVertexArray() {
|
||||
// if vertex source was array, we stowed data in the pool
|
||||
const GeometrySrcState& geoSrc = this->getGeomSrc();
|
||||
GrAssert(kArray_GeometrySrcType == geoSrc.fVertexSrc);
|
||||
SkASSERT(kArray_GeometrySrcType == geoSrc.fVertexSrc);
|
||||
size_t bytes = geoSrc.fVertexCount * geoSrc.fVertexSize;
|
||||
fVertexPool->putBack(bytes);
|
||||
--fVertexPoolUseCnt;
|
||||
@ -493,7 +493,7 @@ void GrGpu::releaseVertexArray() {
|
||||
void GrGpu::releaseIndexArray() {
|
||||
// if index source was array, we stowed data in the pool
|
||||
const GeometrySrcState& geoSrc = this->getGeomSrc();
|
||||
GrAssert(kArray_GeometrySrcType == geoSrc.fIndexSrc);
|
||||
SkASSERT(kArray_GeometrySrcType == geoSrc.fIndexSrc);
|
||||
size_t bytes = geoSrc.fIndexCount * sizeof(uint16_t);
|
||||
fIndexPool->putBack(bytes);
|
||||
--fIndexPoolUseCnt;
|
||||
|
@ -295,7 +295,7 @@ public:
|
||||
* Can the provided configuration act as a color render target?
|
||||
*/
|
||||
bool isConfigRenderable(GrPixelConfig config) const {
|
||||
GrAssert(kGrPixelConfigCnt > config);
|
||||
SkASSERT(kGrPixelConfigCnt > config);
|
||||
return fConfigRenderSupport[config];
|
||||
}
|
||||
|
||||
|
@ -33,8 +33,8 @@ GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
|
||||
fDstGpu->ref();
|
||||
fCaps.reset(SkRef(fDstGpu->caps()));
|
||||
|
||||
GrAssert(NULL != vertexPool);
|
||||
GrAssert(NULL != indexPool);
|
||||
SkASSERT(NULL != vertexPool);
|
||||
SkASSERT(NULL != indexPool);
|
||||
|
||||
GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
|
||||
poolState.fUsedPoolVertexBytes = 0;
|
||||
@ -62,8 +62,8 @@ void get_vertex_bounds(const void* vertices,
|
||||
size_t vertexSize,
|
||||
int vertexCount,
|
||||
SkRect* bounds) {
|
||||
GrAssert(vertexSize >= sizeof(GrPoint));
|
||||
GrAssert(vertexCount > 0);
|
||||
SkASSERT(vertexSize >= sizeof(GrPoint));
|
||||
SkASSERT(vertexCount > 0);
|
||||
const GrPoint* point = static_cast<const GrPoint*>(vertices);
|
||||
bounds->fLeft = bounds->fRight = point->fX;
|
||||
bounds->fTop = bounds->fBottom = point->fY;
|
||||
@ -196,7 +196,7 @@ void GrInOrderDrawBuffer::onDrawRect(const SkRect& rect,
|
||||
this->drawIndexedInstances(kTriangles_GrPrimitiveType, 1, 4, 6, &devBounds);
|
||||
|
||||
// to ensure that stashing the drawState ptr is valid
|
||||
GrAssert(this->drawState() == drawState);
|
||||
SkASSERT(this->drawState() == drawState);
|
||||
}
|
||||
|
||||
bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) {
|
||||
@ -242,7 +242,7 @@ bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) {
|
||||
}
|
||||
|
||||
int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
|
||||
GrAssert(info.isInstanced());
|
||||
SkASSERT(info.isInstanced());
|
||||
|
||||
const GeometrySrcState& geomSrc = this->getGeomSrc();
|
||||
const GrDrawState& drawState = this->getDrawState();
|
||||
@ -278,7 +278,7 @@ int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
GrAssert(poolState.fPoolStartVertex == draw->startVertex() + draw->vertexCount());
|
||||
SkASSERT(poolState.fPoolStartVertex == draw->startVertex() + draw->vertexCount());
|
||||
|
||||
// how many instances can be concat'ed onto draw given the size of the index buffer
|
||||
int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerInstance();
|
||||
@ -406,7 +406,7 @@ void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color, GrRenderTarg
|
||||
SkIRect r;
|
||||
if (NULL == renderTarget) {
|
||||
renderTarget = this->drawState()->getRenderTarget();
|
||||
GrAssert(NULL != renderTarget);
|
||||
SkASSERT(NULL != renderTarget);
|
||||
}
|
||||
if (NULL == rect) {
|
||||
// We could do something smart and remove previous draws and clears to
|
||||
@ -423,13 +423,13 @@ void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color, GrRenderTarg
|
||||
}
|
||||
|
||||
void GrInOrderDrawBuffer::reset() {
|
||||
GrAssert(1 == fGeoPoolStateStack.count());
|
||||
SkASSERT(1 == fGeoPoolStateStack.count());
|
||||
this->resetVertexSource();
|
||||
this->resetIndexSource();
|
||||
int numDraws = fDraws.count();
|
||||
for (int d = 0; d < numDraws; ++d) {
|
||||
// we always have a VB, but not always an IB
|
||||
GrAssert(NULL != fDraws[d].fVertexBuffer);
|
||||
SkASSERT(NULL != fDraws[d].fVertexBuffer);
|
||||
fDraws[d].fVertexBuffer->unref();
|
||||
GrSafeUnref(fDraws[d].fIndexBuffer);
|
||||
}
|
||||
@ -451,8 +451,8 @@ void GrInOrderDrawBuffer::flush() {
|
||||
return;
|
||||
}
|
||||
|
||||
GrAssert(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc);
|
||||
GrAssert(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc);
|
||||
SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc);
|
||||
SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc);
|
||||
|
||||
int numCmds = fCmds.count();
|
||||
if (0 == numCmds) {
|
||||
@ -527,12 +527,12 @@ void GrInOrderDrawBuffer::flush() {
|
||||
}
|
||||
}
|
||||
// we should have consumed all the states, clips, etc.
|
||||
GrAssert(fStates.count() == currState);
|
||||
GrAssert(fClips.count() == currClip);
|
||||
GrAssert(fClipOrigins.count() == currClip);
|
||||
GrAssert(fClears.count() == currClear);
|
||||
GrAssert(fDraws.count() == currDraw);
|
||||
GrAssert(fCopySurfaces.count() == currCopySurface);
|
||||
SkASSERT(fStates.count() == currState);
|
||||
SkASSERT(fClips.count() == currClip);
|
||||
SkASSERT(fClipOrigins.count() == currClip);
|
||||
SkASSERT(fClears.count() == currClear);
|
||||
SkASSERT(fDraws.count() == currDraw);
|
||||
SkASSERT(fCopySurfaces.count() == currCopySurface);
|
||||
|
||||
fDstGpu->setDrawState(prevDrawState);
|
||||
prevDrawState->unref();
|
||||
@ -637,9 +637,9 @@ bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize,
|
||||
int vertexCount,
|
||||
void** vertices) {
|
||||
GeometryPoolState& poolState = fGeoPoolStateStack.back();
|
||||
GrAssert(vertexCount > 0);
|
||||
GrAssert(NULL != vertices);
|
||||
GrAssert(0 == poolState.fUsedPoolVertexBytes);
|
||||
SkASSERT(vertexCount > 0);
|
||||
SkASSERT(NULL != vertices);
|
||||
SkASSERT(0 == poolState.fUsedPoolVertexBytes);
|
||||
|
||||
*vertices = fVertexPool.makeSpace(vertexSize,
|
||||
vertexCount,
|
||||
@ -650,9 +650,9 @@ bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize,
|
||||
|
||||
bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) {
|
||||
GeometryPoolState& poolState = fGeoPoolStateStack.back();
|
||||
GrAssert(indexCount > 0);
|
||||
GrAssert(NULL != indices);
|
||||
GrAssert(0 == poolState.fUsedPoolIndexBytes);
|
||||
SkASSERT(indexCount > 0);
|
||||
SkASSERT(NULL != indices);
|
||||
SkASSERT(0 == poolState.fUsedPoolIndexBytes);
|
||||
|
||||
*indices = fIndexPool.makeSpace(indexCount,
|
||||
&poolState.fPoolIndexBuffer,
|
||||
@ -666,7 +666,7 @@ void GrInOrderDrawBuffer::releaseReservedVertexSpace() {
|
||||
|
||||
// If we get a release vertex space call then our current source should either be reserved
|
||||
// or array (which we copied into reserved space).
|
||||
GrAssert(kReserved_GeometrySrcType == geoSrc.fVertexSrc ||
|
||||
SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc ||
|
||||
kArray_GeometrySrcType == geoSrc.fVertexSrc);
|
||||
|
||||
// When the caller reserved vertex buffer space we gave it back a pointer
|
||||
@ -687,7 +687,7 @@ void GrInOrderDrawBuffer::releaseReservedIndexSpace() {
|
||||
|
||||
// If we get a release index space call then our current source should either be reserved
|
||||
// or array (which we copied into reserved space).
|
||||
GrAssert(kReserved_GeometrySrcType == geoSrc.fIndexSrc ||
|
||||
SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc ||
|
||||
kArray_GeometrySrcType == geoSrc.fIndexSrc);
|
||||
|
||||
// Similar to releaseReservedVertexSpace we return any unused portion at
|
||||
@ -703,7 +703,7 @@ void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray,
|
||||
int vertexCount) {
|
||||
|
||||
GeometryPoolState& poolState = fGeoPoolStateStack.back();
|
||||
GrAssert(0 == poolState.fUsedPoolVertexBytes);
|
||||
SkASSERT(0 == poolState.fUsedPoolVertexBytes);
|
||||
#if GR_DEBUG
|
||||
bool success =
|
||||
#endif
|
||||
@ -718,7 +718,7 @@ void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray,
|
||||
void GrInOrderDrawBuffer::onSetIndexSourceToArray(const void* indexArray,
|
||||
int indexCount) {
|
||||
GeometryPoolState& poolState = fGeoPoolStateStack.back();
|
||||
GrAssert(0 == poolState.fUsedPoolIndexBytes);
|
||||
SkASSERT(0 == poolState.fUsedPoolIndexBytes);
|
||||
#if GR_DEBUG
|
||||
bool success =
|
||||
#endif
|
||||
@ -755,7 +755,7 @@ void GrInOrderDrawBuffer::geometrySourceWillPush() {
|
||||
|
||||
void GrInOrderDrawBuffer::geometrySourceWillPop(
|
||||
const GeometrySrcState& restoredState) {
|
||||
GrAssert(fGeoPoolStateStack.count() > 1);
|
||||
SkASSERT(fGeoPoolStateStack.count() > 1);
|
||||
fGeoPoolStateStack.pop_back();
|
||||
GeometryPoolState& poolState = fGeoPoolStateStack.back();
|
||||
// we have to assume that any slack we had in our vertex/index data
|
||||
@ -777,7 +777,7 @@ bool GrInOrderDrawBuffer::needsNewState() const {
|
||||
}
|
||||
|
||||
bool GrInOrderDrawBuffer::needsNewClip() const {
|
||||
GrAssert(fClips.count() == fClipOrigins.count());
|
||||
SkASSERT(fClips.count() == fClipOrigins.count());
|
||||
if (this->getDrawState().isClipState()) {
|
||||
if (fClipSet &&
|
||||
(fClips.empty() ||
|
||||
|
@ -30,9 +30,9 @@ GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize) {
|
||||
|
||||
GrMemoryPool::~GrMemoryPool() {
|
||||
VALIDATE;
|
||||
GrAssert(0 == fAllocationCnt);
|
||||
GrAssert(fHead == fTail);
|
||||
GrAssert(0 == fHead->fLiveCount);
|
||||
SkASSERT(0 == fAllocationCnt);
|
||||
SkASSERT(fHead == fTail);
|
||||
SkASSERT(0 == fHead->fLiveCount);
|
||||
DeleteBlock(fHead);
|
||||
};
|
||||
|
||||
@ -47,11 +47,11 @@ void* GrMemoryPool::allocate(size_t size) {
|
||||
|
||||
block->fPrev = fTail;
|
||||
block->fNext = NULL;
|
||||
GrAssert(NULL == fTail->fNext);
|
||||
SkASSERT(NULL == fTail->fNext);
|
||||
fTail->fNext = block;
|
||||
fTail = block;
|
||||
}
|
||||
GrAssert(fTail->fFreeSize >= size);
|
||||
SkASSERT(fTail->fFreeSize >= size);
|
||||
intptr_t ptr = fTail->fCurrPtr;
|
||||
// We stash a pointer to the block header, just before the allocated space,
|
||||
// so that we can decrement the live count on delete in constant time.
|
||||
@ -80,12 +80,12 @@ void GrMemoryPool::release(void* p) {
|
||||
} else {
|
||||
BlockHeader* prev = block->fPrev;
|
||||
BlockHeader* next = block->fNext;
|
||||
GrAssert(prev);
|
||||
SkASSERT(prev);
|
||||
prev->fNext = next;
|
||||
if (next) {
|
||||
next->fPrev = prev;
|
||||
} else {
|
||||
GrAssert(fTail == block);
|
||||
SkASSERT(fTail == block);
|
||||
fTail = prev;
|
||||
}
|
||||
DeleteBlock(block);
|
||||
@ -106,7 +106,7 @@ GrMemoryPool::BlockHeader* GrMemoryPool::CreateBlock(size_t size) {
|
||||
BlockHeader* block =
|
||||
reinterpret_cast<BlockHeader*>(GrMalloc(size + kHeaderSize));
|
||||
// we assume malloc gives us aligned memory
|
||||
GrAssert(!(reinterpret_cast<intptr_t>(block) % kAlignment));
|
||||
SkASSERT(!(reinterpret_cast<intptr_t>(block) % kAlignment));
|
||||
block->fLiveCount = 0;
|
||||
block->fFreeSize = size;
|
||||
block->fCurrPtr = reinterpret_cast<intptr_t>(block) + kHeaderSize;
|
||||
@ -122,13 +122,13 @@ void GrMemoryPool::validate() {
|
||||
#ifdef SK_DEBUG
|
||||
BlockHeader* block = fHead;
|
||||
BlockHeader* prev = NULL;
|
||||
GrAssert(block);
|
||||
SkASSERT(block);
|
||||
int allocCount = 0;
|
||||
do {
|
||||
allocCount += block->fLiveCount;
|
||||
GrAssert(prev == block->fPrev);
|
||||
SkASSERT(prev == block->fPrev);
|
||||
if (NULL != prev) {
|
||||
GrAssert(prev->fNext == block);
|
||||
SkASSERT(prev->fNext == block);
|
||||
}
|
||||
|
||||
intptr_t b = reinterpret_cast<intptr_t>(block);
|
||||
@ -137,25 +137,25 @@ void GrMemoryPool::validate() {
|
||||
size_t userSize = totalSize - kHeaderSize;
|
||||
intptr_t userStart = b + kHeaderSize;
|
||||
|
||||
GrAssert(!(b % kAlignment));
|
||||
GrAssert(!(totalSize % kAlignment));
|
||||
GrAssert(!(userSize % kAlignment));
|
||||
GrAssert(!(block->fCurrPtr % kAlignment));
|
||||
SkASSERT(!(b % kAlignment));
|
||||
SkASSERT(!(totalSize % kAlignment));
|
||||
SkASSERT(!(userSize % kAlignment));
|
||||
SkASSERT(!(block->fCurrPtr % kAlignment));
|
||||
if (fHead != block) {
|
||||
GrAssert(block->fLiveCount);
|
||||
GrAssert(userSize >= fMinAllocSize);
|
||||
SkASSERT(block->fLiveCount);
|
||||
SkASSERT(userSize >= fMinAllocSize);
|
||||
} else {
|
||||
GrAssert(userSize == fPreallocSize);
|
||||
SkASSERT(userSize == fPreallocSize);
|
||||
}
|
||||
if (!block->fLiveCount) {
|
||||
GrAssert(ptrOffset == kHeaderSize);
|
||||
GrAssert(userStart == block->fCurrPtr);
|
||||
SkASSERT(ptrOffset == kHeaderSize);
|
||||
SkASSERT(userStart == block->fCurrPtr);
|
||||
} else {
|
||||
GrAssert(block == *reinterpret_cast<BlockHeader**>(userStart));
|
||||
SkASSERT(block == *reinterpret_cast<BlockHeader**>(userStart));
|
||||
}
|
||||
prev = block;
|
||||
} while ((block = block->fNext));
|
||||
GrAssert(allocCount == fAllocationCnt);
|
||||
GrAssert(prev == fTail);
|
||||
SkASSERT(allocCount == fAllocationCnt);
|
||||
SkASSERT(prev == fTail);
|
||||
#endif
|
||||
}
|
||||
|
@ -346,7 +346,7 @@ void GrOvalRenderer::drawCircle(GrDrawTarget* target,
|
||||
}
|
||||
|
||||
drawState->setVertexAttribs<gCircleVertexAttribs>(SK_ARRAY_COUNT(gCircleVertexAttribs));
|
||||
GrAssert(sizeof(CircleVertex) == drawState->getVertexSize());
|
||||
SkASSERT(sizeof(CircleVertex) == drawState->getVertexSize());
|
||||
|
||||
GrDrawTarget::AutoReleaseGeometry geo(target, 4, 0);
|
||||
if (!geo.succeeded()) {
|
||||
@ -502,7 +502,7 @@ bool GrOvalRenderer::drawEllipse(GrDrawTarget* target,
|
||||
}
|
||||
|
||||
drawState->setVertexAttribs<gEllipseVertexAttribs>(SK_ARRAY_COUNT(gEllipseVertexAttribs));
|
||||
GrAssert(sizeof(EllipseVertex) == drawState->getVertexSize());
|
||||
SkASSERT(sizeof(EllipseVertex) == drawState->getVertexSize());
|
||||
|
||||
GrDrawTarget::AutoReleaseGeometry geo(target, 4, 0);
|
||||
if (!geo.succeeded()) {
|
||||
@ -662,7 +662,7 @@ bool GrOvalRenderer::drawSimpleRRect(GrDrawTarget* target, GrContext* context, b
|
||||
// if the corners are circles, use the circle renderer
|
||||
if ((!isStroked || scaledStroke.fX == scaledStroke.fY) && xRadius == yRadius) {
|
||||
drawState->setVertexAttribs<gCircleVertexAttribs>(SK_ARRAY_COUNT(gCircleVertexAttribs));
|
||||
GrAssert(sizeof(CircleVertex) == drawState->getVertexSize());
|
||||
SkASSERT(sizeof(CircleVertex) == drawState->getVertexSize());
|
||||
|
||||
GrDrawTarget::AutoReleaseGeometry geo(target, 16, 0);
|
||||
if (!geo.succeeded()) {
|
||||
@ -749,7 +749,7 @@ bool GrOvalRenderer::drawSimpleRRect(GrDrawTarget* target, GrContext* context, b
|
||||
// otherwise we use the ellipse renderer
|
||||
} else {
|
||||
drawState->setVertexAttribs<gEllipseVertexAttribs>(SK_ARRAY_COUNT(gEllipseVertexAttribs));
|
||||
GrAssert(sizeof(EllipseVertex) == drawState->getVertexSize());
|
||||
SkASSERT(sizeof(EllipseVertex) == drawState->getVertexSize());
|
||||
|
||||
SkScalar innerXRadius = 0.0f;
|
||||
SkScalar innerYRadius = 0.0f;
|
||||
|
@ -79,7 +79,7 @@ bool GrPaint::getOpaqueAndKnownColor(GrColor* solidColor,
|
||||
(*fColorStages[i].getEffect())->getConstantColorComponents(&color, &colorComps);
|
||||
}
|
||||
|
||||
GrAssert((NULL == solidColor) == (NULL == solidColorKnownComponents));
|
||||
SkASSERT((NULL == solidColor) == (NULL == solidColorKnownComponents));
|
||||
|
||||
GrBlendCoeff srcCoeff = fSrcBlendCoeff;
|
||||
GrBlendCoeff dstCoeff = fDstBlendCoeff;
|
||||
|
@ -84,7 +84,7 @@ public:
|
||||
StencilSupport getStencilSupport(const SkPath& path,
|
||||
const SkStrokeRec& stroke,
|
||||
const GrDrawTarget* target) const {
|
||||
GrAssert(!path.isInverseFillType());
|
||||
SkASSERT(!path.isInverseFillType());
|
||||
return this->onGetStencilSupport(path, stroke, target);
|
||||
}
|
||||
|
||||
@ -117,9 +117,9 @@ public:
|
||||
const SkStrokeRec& stroke,
|
||||
GrDrawTarget* target,
|
||||
bool antiAlias) {
|
||||
GrAssert(!path.isEmpty());
|
||||
GrAssert(this->canDrawPath(path, stroke, target, antiAlias));
|
||||
GrAssert(target->drawState()->getStencil().isDisabled() ||
|
||||
SkASSERT(!path.isEmpty());
|
||||
SkASSERT(this->canDrawPath(path, stroke, target, antiAlias));
|
||||
SkASSERT(target->drawState()->getStencil().isDisabled() ||
|
||||
kNoRestriction_StencilSupport == this->getStencilSupport(path, stroke, target));
|
||||
return this->onDrawPath(path, stroke, target, antiAlias);
|
||||
}
|
||||
@ -133,8 +133,8 @@ public:
|
||||
* @param target target that the path will be rendered to
|
||||
*/
|
||||
void stencilPath(const SkPath& path, const SkStrokeRec& stroke, GrDrawTarget* target) {
|
||||
GrAssert(!path.isEmpty());
|
||||
GrAssert(kNoSupport_StencilSupport != this->getStencilSupport(path, stroke, target));
|
||||
SkASSERT(!path.isEmpty());
|
||||
SkASSERT(kNoSupport_StencilSupport != this->getStencilSupport(path, stroke, target));
|
||||
this->onStencilPath(path, stroke, target);
|
||||
}
|
||||
|
||||
|
@ -78,7 +78,7 @@ GrPathRenderer* GrPathRendererChain::getPathRenderer(const SkPath& path,
|
||||
}
|
||||
|
||||
void GrPathRendererChain::init() {
|
||||
GrAssert(!fInit);
|
||||
SkASSERT(!fInit);
|
||||
GrGpu* gpu = fOwner->getGpu();
|
||||
bool twoSided = gpu->caps()->twoSidedStencilSupport();
|
||||
bool wrapOp = gpu->caps()->stencilWrapOpsSupport();
|
||||
|
@ -41,7 +41,7 @@ uint32_t GrPathUtils::quadraticPointCount(const GrPoint points[],
|
||||
if (tol < gMinCurveTol) {
|
||||
tol = gMinCurveTol;
|
||||
}
|
||||
GrAssert(tol > 0);
|
||||
SkASSERT(tol > 0);
|
||||
|
||||
SkScalar d = points[1].distanceToLineSegmentBetween(points[0], points[2]);
|
||||
if (d <= tol) {
|
||||
@ -93,7 +93,7 @@ uint32_t GrPathUtils::cubicPointCount(const GrPoint points[],
|
||||
if (tol < gMinCurveTol) {
|
||||
tol = gMinCurveTol;
|
||||
}
|
||||
GrAssert(tol > 0);
|
||||
SkASSERT(tol > 0);
|
||||
|
||||
SkScalar d = GrMax(
|
||||
points[1].distanceToLineSegmentBetweenSqd(points[0], points[3]),
|
||||
@ -149,7 +149,7 @@ int GrPathUtils::worstCasePointCount(const SkPath& path, int* subpaths,
|
||||
if (tol < gMinCurveTol) {
|
||||
tol = gMinCurveTol;
|
||||
}
|
||||
GrAssert(tol > 0);
|
||||
SkASSERT(tol > 0);
|
||||
|
||||
int pointCount = 0;
|
||||
*subpaths = 1;
|
||||
@ -251,8 +251,8 @@ void GrPathUtils::QuadUVMatrix::set(const GrPoint qPts[3]) {
|
||||
|
||||
// The matrix should not have perspective.
|
||||
SkDEBUGCODE(static const SkScalar gTOL = SkFloatToScalar(1.f / 100.f));
|
||||
GrAssert(SkScalarAbs(m.get(SkMatrix::kMPersp0)) < gTOL);
|
||||
GrAssert(SkScalarAbs(m.get(SkMatrix::kMPersp1)) < gTOL);
|
||||
SkASSERT(SkScalarAbs(m.get(SkMatrix::kMPersp0)) < gTOL);
|
||||
SkASSERT(SkScalarAbs(m.get(SkMatrix::kMPersp1)) < gTOL);
|
||||
|
||||
// It may not be normalized to have 1.0 in the bottom right
|
||||
float m33 = m.get(SkMatrix::kMPersp2);
|
||||
@ -296,7 +296,7 @@ bool is_point_within_cubic_tangents(const SkPoint& a,
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
GrAssert(SkPath::kCCW_Direction == dir);
|
||||
SkASSERT(SkPath::kCCW_Direction == dir);
|
||||
if (apXab < 0) {
|
||||
return false;
|
||||
}
|
||||
@ -309,7 +309,7 @@ bool is_point_within_cubic_tangents(const SkPoint& a,
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
GrAssert(SkPath::kCCW_Direction == dir);
|
||||
SkASSERT(SkPath::kCCW_Direction == dir);
|
||||
if (dpXdc > 0) {
|
||||
return false;
|
||||
}
|
||||
|
@ -53,14 +53,14 @@ public:
|
||||
}
|
||||
|
||||
bool isBusy(int x, int y) const {
|
||||
GrAssert((unsigned)x < (unsigned)fDim.fX);
|
||||
GrAssert((unsigned)y < (unsigned)fDim.fY);
|
||||
SkASSERT((unsigned)x < (unsigned)fDim.fX);
|
||||
SkASSERT((unsigned)y < (unsigned)fDim.fY);
|
||||
return fBusy[y * fDim.fX + x] != 0;
|
||||
}
|
||||
|
||||
void freePlot(int x, int y) {
|
||||
GrAssert((unsigned)x < (unsigned)fDim.fX);
|
||||
GrAssert((unsigned)y < (unsigned)fDim.fY);
|
||||
SkASSERT((unsigned)x < (unsigned)fDim.fX);
|
||||
SkASSERT((unsigned)y < (unsigned)fDim.fY);
|
||||
fBusy[y * fDim.fX + x] = false;
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,7 @@ public:
|
||||
Row fRows[16];
|
||||
|
||||
static int HeightToRowIndex(int height) {
|
||||
GrAssert(height >= MIN_HEIGHT_POW2);
|
||||
SkASSERT(height >= MIN_HEIGHT_POW2);
|
||||
return 32 - SkCLZ(height - 1);
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ bool GrRectanizerPow2::addRect(int width, int height, GrIPoint16* loc) {
|
||||
}
|
||||
|
||||
Row* row = &fRows[HeightToRowIndex(height)];
|
||||
GrAssert(row->fRowHeight == 0 || row->fRowHeight == height);
|
||||
SkASSERT(row->fRowHeight == 0 || row->fRowHeight == height);
|
||||
|
||||
if (0 == row->fRowHeight) {
|
||||
if (!this->canAddStrip(height)) {
|
||||
@ -102,14 +102,14 @@ bool GrRectanizerPow2::addRect(int width, int height, GrIPoint16* loc) {
|
||||
}
|
||||
}
|
||||
|
||||
GrAssert(row->fRowHeight == height);
|
||||
GrAssert(row->canAddWidth(width, this->width()));
|
||||
SkASSERT(row->fRowHeight == height);
|
||||
SkASSERT(row->canAddWidth(width, this->width()));
|
||||
*loc = row->fLoc;
|
||||
row->fLoc.fX += width;
|
||||
|
||||
GrAssert(row->fLoc.fX <= this->width());
|
||||
GrAssert(row->fLoc.fY <= this->height());
|
||||
GrAssert(fNextStripY <= this->height());
|
||||
SkASSERT(row->fLoc.fX <= this->width());
|
||||
SkASSERT(row->fLoc.fY <= this->height());
|
||||
SkASSERT(fNextStripY <= this->height());
|
||||
fAreaSoFar += area;
|
||||
return true;
|
||||
}
|
||||
|
@ -20,8 +20,8 @@ public:
|
||||
class GrRectanizer {
|
||||
public:
|
||||
GrRectanizer(int width, int height) : fWidth(width), fHeight(height) {
|
||||
GrAssert(width >= 0);
|
||||
GrAssert(height >= 0);
|
||||
SkASSERT(width >= 0);
|
||||
SkASSERT(height >= 0);
|
||||
}
|
||||
|
||||
virtual ~GrRectanizer() {}
|
||||
|
@ -47,7 +47,7 @@ public:
|
||||
Row fRows[16];
|
||||
|
||||
static int HeightToRowIndex(int height) {
|
||||
GrAssert(height >= MIN_HEIGHT_POW2);
|
||||
SkASSERT(height >= MIN_HEIGHT_POW2);
|
||||
return 32 - Gr_clz(height - 1);
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ bool GrRectanizerFIFO::addRect(int width, int height, GrIPoint16* loc) {
|
||||
}
|
||||
|
||||
Row* row = &fRows[HeightToRowIndex(height)];
|
||||
GrAssert(row->fRowHeight == 0 || row->fRowHeight == height);
|
||||
SkASSERT(row->fRowHeight == 0 || row->fRowHeight == height);
|
||||
|
||||
if (0 == row->fRowHeight) {
|
||||
if (!this->canAddStrip(height)) {
|
||||
@ -102,14 +102,14 @@ bool GrRectanizerFIFO::addRect(int width, int height, GrIPoint16* loc) {
|
||||
}
|
||||
}
|
||||
|
||||
GrAssert(row->fRowHeight == height);
|
||||
GrAssert(row->canAddWidth(width, this->width()));
|
||||
SkASSERT(row->fRowHeight == height);
|
||||
SkASSERT(row->canAddWidth(width, this->width()));
|
||||
*loc = row->fLoc;
|
||||
row->fLoc.fX += width;
|
||||
|
||||
GrAssert(row->fLoc.fX <= this->width());
|
||||
GrAssert(row->fLoc.fY <= this->height());
|
||||
GrAssert(fNextStripY <= this->height());
|
||||
SkASSERT(row->fLoc.fX <= this->width());
|
||||
SkASSERT(row->fLoc.fY <= this->height());
|
||||
SkASSERT(fNextStripY <= this->height());
|
||||
fAreaSoFar += area;
|
||||
return true;
|
||||
}
|
||||
|
@ -196,12 +196,12 @@ public:
|
||||
}
|
||||
bool operator !=(const Iter& i) const { return !(*this == i); }
|
||||
Iter& operator ++() {
|
||||
GrAssert(*this != fTree->end());
|
||||
SkASSERT(*this != fTree->end());
|
||||
fN = SuccessorNode(fN);
|
||||
return *this;
|
||||
}
|
||||
Iter& operator --() {
|
||||
GrAssert(*this != fTree->begin());
|
||||
SkASSERT(*this != fTree->begin());
|
||||
if (NULL != fN) {
|
||||
fN = PredecessorNode(fN);
|
||||
} else {
|
||||
@ -378,7 +378,7 @@ typename GrRedBlackTree<T,C>::Iter GrRedBlackTree<T,C>::insert(const T& t) {
|
||||
fRoot = x;
|
||||
x->fColor = kBlack_Color;
|
||||
x->fParent = NULL;
|
||||
GrAssert(1 == fCount);
|
||||
SkASSERT(1 == fCount);
|
||||
return Iter(returnNode, this);
|
||||
}
|
||||
p->fChildren[pc] = x;
|
||||
@ -387,13 +387,13 @@ typename GrRedBlackTree<T,C>::Iter GrRedBlackTree<T,C>::insert(const T& t) {
|
||||
|
||||
do {
|
||||
// assumptions at loop start.
|
||||
GrAssert(NULL != x);
|
||||
GrAssert(kRed_Color == x->fColor);
|
||||
SkASSERT(NULL != x);
|
||||
SkASSERT(kRed_Color == x->fColor);
|
||||
// can't have a grandparent but no parent.
|
||||
GrAssert(!(NULL != gp && NULL == p));
|
||||
SkASSERT(!(NULL != gp && NULL == p));
|
||||
// make sure pc and gpc are correct
|
||||
GrAssert(NULL == p || p->fChildren[pc] == x);
|
||||
GrAssert(NULL == gp || gp->fChildren[gpc] == p);
|
||||
SkASSERT(NULL == p || p->fChildren[pc] == x);
|
||||
SkASSERT(NULL == gp || gp->fChildren[gpc] == p);
|
||||
|
||||
// if x's parent is black then we didn't violate any of the
|
||||
// red/black properties when we added x as red.
|
||||
@ -401,9 +401,9 @@ typename GrRedBlackTree<T,C>::Iter GrRedBlackTree<T,C>::insert(const T& t) {
|
||||
return Iter(returnNode, this);
|
||||
}
|
||||
// gp must be valid because if p was the root then it is black
|
||||
GrAssert(NULL != gp);
|
||||
SkASSERT(NULL != gp);
|
||||
// gp must be black since it's child, p, is red.
|
||||
GrAssert(kBlack_Color == gp->fColor);
|
||||
SkASSERT(kBlack_Color == gp->fColor);
|
||||
|
||||
|
||||
// x and its parent are red, violating red-black property.
|
||||
@ -419,7 +419,7 @@ typename GrRedBlackTree<T,C>::Iter GrRedBlackTree<T,C>::insert(const T& t) {
|
||||
p = x->fParent;
|
||||
if (NULL == p) {
|
||||
// x (prev gp) is the root, color it black and be done.
|
||||
GrAssert(fRoot == x);
|
||||
SkASSERT(fRoot == x);
|
||||
x->fColor = kBlack_Color;
|
||||
validate();
|
||||
return Iter(returnNode, this);
|
||||
@ -436,10 +436,10 @@ typename GrRedBlackTree<T,C>::Iter GrRedBlackTree<T,C>::insert(const T& t) {
|
||||
} while (true);
|
||||
// Here p is red but u is black and we still have to resolve the fact
|
||||
// that x and p are both red.
|
||||
GrAssert(NULL == gp->fChildren[1-gpc] || kBlack_Color == gp->fChildren[1-gpc]->fColor);
|
||||
GrAssert(kRed_Color == x->fColor);
|
||||
GrAssert(kRed_Color == p->fColor);
|
||||
GrAssert(kBlack_Color == gp->fColor);
|
||||
SkASSERT(NULL == gp->fChildren[1-gpc] || kBlack_Color == gp->fChildren[1-gpc]->fColor);
|
||||
SkASSERT(kRed_Color == x->fColor);
|
||||
SkASSERT(kRed_Color == p->fColor);
|
||||
SkASSERT(kBlack_Color == gp->fColor);
|
||||
|
||||
// make x be on the same side of p as p is of gp. If it isn't already
|
||||
// the case then rotate x up to p and swap their labels.
|
||||
@ -462,7 +462,7 @@ typename GrRedBlackTree<T,C>::Iter GrRedBlackTree<T,C>::insert(const T& t) {
|
||||
// gp's child, u, that is not affected we know to be black. gp's new
|
||||
// child is p's previous child (x's pre-rotation sibling) which must be
|
||||
// black since p is red.
|
||||
GrAssert(NULL == p->fChildren[1-pc] ||
|
||||
SkASSERT(NULL == p->fChildren[1-pc] ||
|
||||
kBlack_Color == p->fChildren[1-pc]->fColor);
|
||||
// Since gp's two children are black it can become red if p is made
|
||||
// black. This leaves the black-height of both of p's new subtrees
|
||||
@ -491,7 +491,7 @@ void GrRedBlackTree<T,C>::rotateRight(Node* n) {
|
||||
*/
|
||||
Node* d = n->fParent;
|
||||
Node* s = n->fChildren[kLeft_Child];
|
||||
GrAssert(NULL != s);
|
||||
SkASSERT(NULL != s);
|
||||
Node* b = s->fChildren[kRight_Child];
|
||||
|
||||
if (NULL != d) {
|
||||
@ -499,7 +499,7 @@ void GrRedBlackTree<T,C>::rotateRight(Node* n) {
|
||||
kRight_Child;
|
||||
d->fChildren[c] = s;
|
||||
} else {
|
||||
GrAssert(fRoot == n);
|
||||
SkASSERT(fRoot == n);
|
||||
fRoot = s;
|
||||
}
|
||||
s->fParent = d;
|
||||
@ -523,7 +523,7 @@ void GrRedBlackTree<T,C>::rotateLeft(Node* n) {
|
||||
|
||||
Node* d = n->fParent;
|
||||
Node* s = n->fChildren[kRight_Child];
|
||||
GrAssert(NULL != s);
|
||||
SkASSERT(NULL != s);
|
||||
Node* b = s->fChildren[kLeft_Child];
|
||||
|
||||
if (NULL != d) {
|
||||
@ -531,7 +531,7 @@ void GrRedBlackTree<T,C>::rotateLeft(Node* n) {
|
||||
kLeft_Child;
|
||||
d->fChildren[c] = s;
|
||||
} else {
|
||||
GrAssert(fRoot == n);
|
||||
SkASSERT(fRoot == n);
|
||||
fRoot = s;
|
||||
}
|
||||
s->fParent = d;
|
||||
@ -552,7 +552,7 @@ void GrRedBlackTree<T,C>::rotateLeft(Node* n) {
|
||||
|
||||
template <typename T, typename C>
|
||||
typename GrRedBlackTree<T,C>::Node* GrRedBlackTree<T,C>::SuccessorNode(Node* x) {
|
||||
GrAssert(NULL != x);
|
||||
SkASSERT(NULL != x);
|
||||
if (NULL != x->fChildren[kRight_Child]) {
|
||||
x = x->fChildren[kRight_Child];
|
||||
while (NULL != x->fChildren[kLeft_Child]) {
|
||||
@ -568,7 +568,7 @@ typename GrRedBlackTree<T,C>::Node* GrRedBlackTree<T,C>::SuccessorNode(Node* x)
|
||||
|
||||
template <typename T, typename C>
|
||||
typename GrRedBlackTree<T,C>::Node* GrRedBlackTree<T,C>::PredecessorNode(Node* x) {
|
||||
GrAssert(NULL != x);
|
||||
SkASSERT(NULL != x);
|
||||
if (NULL != x->fChildren[kLeft_Child]) {
|
||||
x = x->fChildren[kLeft_Child];
|
||||
while (NULL != x->fChildren[kRight_Child]) {
|
||||
@ -584,7 +584,7 @@ typename GrRedBlackTree<T,C>::Node* GrRedBlackTree<T,C>::PredecessorNode(Node* x
|
||||
|
||||
template <typename T, typename C>
|
||||
void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
|
||||
GrAssert(NULL != x);
|
||||
SkASSERT(NULL != x);
|
||||
validate();
|
||||
--fCount;
|
||||
|
||||
@ -594,15 +594,15 @@ void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
|
||||
|
||||
if (hasLeft && hasRight) {
|
||||
// first and last can't have two children.
|
||||
GrAssert(fFirst != x);
|
||||
GrAssert(fLast != x);
|
||||
SkASSERT(fFirst != x);
|
||||
SkASSERT(fLast != x);
|
||||
// if x is an interior node then we find it's successor
|
||||
// and swap them.
|
||||
Node* s = x->fChildren[kRight_Child];
|
||||
while (NULL != s->fChildren[kLeft_Child]) {
|
||||
s = s->fChildren[kLeft_Child];
|
||||
}
|
||||
GrAssert(NULL != s);
|
||||
SkASSERT(NULL != s);
|
||||
// this might be expensive relative to swapping node ptrs around.
|
||||
// depends on T.
|
||||
x->fItem = s->fItem;
|
||||
@ -611,23 +611,23 @@ void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
|
||||
} else if (NULL == x->fParent) {
|
||||
// if x was the root we just replace it with its child and make
|
||||
// the new root (if the tree is not empty) black.
|
||||
GrAssert(fRoot == x);
|
||||
SkASSERT(fRoot == x);
|
||||
fRoot = x->fChildren[c];
|
||||
if (NULL != fRoot) {
|
||||
fRoot->fParent = NULL;
|
||||
fRoot->fColor = kBlack_Color;
|
||||
if (x == fLast) {
|
||||
GrAssert(c == kLeft_Child);
|
||||
SkASSERT(c == kLeft_Child);
|
||||
fLast = fRoot;
|
||||
} else if (x == fFirst) {
|
||||
GrAssert(c == kRight_Child);
|
||||
SkASSERT(c == kRight_Child);
|
||||
fFirst = fRoot;
|
||||
}
|
||||
} else {
|
||||
GrAssert(fFirst == fLast && x == fFirst);
|
||||
SkASSERT(fFirst == fLast && x == fFirst);
|
||||
fFirst = NULL;
|
||||
fLast = NULL;
|
||||
GrAssert(0 == fCount);
|
||||
SkASSERT(0 == fCount);
|
||||
}
|
||||
delete x;
|
||||
validate();
|
||||
@ -641,10 +641,10 @@ void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
|
||||
if (NULL == x->fChildren[c]) {
|
||||
if (fLast == x) {
|
||||
fLast = p;
|
||||
GrAssert(p == PredecessorNode(x));
|
||||
SkASSERT(p == PredecessorNode(x));
|
||||
} else if (fFirst == x) {
|
||||
fFirst = p;
|
||||
GrAssert(p == SuccessorNode(x));
|
||||
SkASSERT(p == SuccessorNode(x));
|
||||
}
|
||||
// x has two implicit black children.
|
||||
Color xcolor = x->fColor;
|
||||
@ -663,8 +663,8 @@ void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
|
||||
//s cannot be an implicit black node because the original
|
||||
// black-height at x was >= 2 and s's black-height must equal the
|
||||
// initial black height of x.
|
||||
GrAssert(NULL != s);
|
||||
GrAssert(p == s->fParent);
|
||||
SkASSERT(NULL != s);
|
||||
SkASSERT(p == s->fParent);
|
||||
|
||||
// assigned in loop
|
||||
Node* sl;
|
||||
@ -680,9 +680,9 @@ void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
|
||||
// be real nodes.
|
||||
// The x side of p has a black-height that is one less than the
|
||||
// s side. It must be rebalanced.
|
||||
GrAssert(NULL != s);
|
||||
GrAssert(p == s->fParent);
|
||||
GrAssert(NULL == x || x->fParent == p);
|
||||
SkASSERT(NULL != s);
|
||||
SkASSERT(p == s->fParent);
|
||||
SkASSERT(NULL == x || x->fParent == p);
|
||||
|
||||
//sl and sr are s's children, which may be implicit.
|
||||
sl = s->fChildren[kLeft_Child];
|
||||
@ -692,11 +692,11 @@ void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
|
||||
// that x's new sibling is black
|
||||
if (kRed_Color == s->fColor) {
|
||||
// if s is red then it's parent must be black.
|
||||
GrAssert(kBlack_Color == p->fColor);
|
||||
SkASSERT(kBlack_Color == p->fColor);
|
||||
// s's children must also be black since s is red. They can't
|
||||
// be implicit since s is red and it's black-height is >= 2.
|
||||
GrAssert(NULL != sl && kBlack_Color == sl->fColor);
|
||||
GrAssert(NULL != sr && kBlack_Color == sr->fColor);
|
||||
SkASSERT(NULL != sl && kBlack_Color == sl->fColor);
|
||||
SkASSERT(NULL != sr && kBlack_Color == sr->fColor);
|
||||
p->fColor = kRed_Color;
|
||||
s->fColor = kBlack_Color;
|
||||
if (kLeft_Child == pc) {
|
||||
@ -710,10 +710,10 @@ void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
|
||||
sr = s->fChildren[kRight_Child];
|
||||
}
|
||||
// x and s are now both black.
|
||||
GrAssert(kBlack_Color == s->fColor);
|
||||
GrAssert(NULL == x || kBlack_Color == x->fColor);
|
||||
GrAssert(p == s->fParent);
|
||||
GrAssert(NULL == x || p == x->fParent);
|
||||
SkASSERT(kBlack_Color == s->fColor);
|
||||
SkASSERT(NULL == x || kBlack_Color == x->fColor);
|
||||
SkASSERT(p == s->fParent);
|
||||
SkASSERT(NULL == x || p == x->fParent);
|
||||
|
||||
// when x is deleted its subtree will have reduced black-height.
|
||||
slRed = (NULL != sl && kRed_Color == sl->fColor);
|
||||
@ -733,7 +733,7 @@ void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
|
||||
x = p;
|
||||
p = x->fParent;
|
||||
if (NULL == p) {
|
||||
GrAssert(fRoot == x);
|
||||
SkASSERT(fRoot == x);
|
||||
validate();
|
||||
return;
|
||||
} else {
|
||||
@ -742,8 +742,8 @@ void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
|
||||
|
||||
}
|
||||
s = p->fChildren[1-pc];
|
||||
GrAssert(NULL != s);
|
||||
GrAssert(p == s->fParent);
|
||||
SkASSERT(NULL != s);
|
||||
SkASSERT(p == s->fParent);
|
||||
continue;
|
||||
} else if (kRed_Color == p->fColor) {
|
||||
// we can make p black and s red. This balance out p's
|
||||
@ -760,7 +760,7 @@ void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
|
||||
// if we made it here one or both of sl and sr is red.
|
||||
// s and x are black. We make sure that a red child is on
|
||||
// the same side of s as s is of p.
|
||||
GrAssert(slRed || srRed);
|
||||
SkASSERT(slRed || srRed);
|
||||
if (kLeft_Child == pc && !srRed) {
|
||||
s->fColor = kRed_Color;
|
||||
sl->fColor = kBlack_Color;
|
||||
@ -787,11 +787,11 @@ void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
|
||||
s->fColor = p->fColor;
|
||||
p->fColor = kBlack_Color;
|
||||
if (kLeft_Child == pc) {
|
||||
GrAssert(NULL != sr && kRed_Color == sr->fColor);
|
||||
SkASSERT(NULL != sr && kRed_Color == sr->fColor);
|
||||
sr->fColor = kBlack_Color;
|
||||
rotateLeft(p);
|
||||
} else {
|
||||
GrAssert(NULL != sl && kRed_Color == sl->fColor);
|
||||
SkASSERT(NULL != sl && kRed_Color == sl->fColor);
|
||||
sl->fColor = kBlack_Color;
|
||||
rotateRight(p);
|
||||
}
|
||||
@ -802,28 +802,28 @@ void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
|
||||
// child and c1 be its non-implicit child. c1 must be black because
|
||||
// red nodes always have two black children. Then the two subtrees
|
||||
// of x rooted at c0 and c1 will have different black-heights.
|
||||
GrAssert(kBlack_Color == x->fColor);
|
||||
SkASSERT(kBlack_Color == x->fColor);
|
||||
// So we know x is black and has one implicit black child, c0. c1
|
||||
// must be red, otherwise the subtree at c1 will have a different
|
||||
// black-height than the subtree rooted at c0.
|
||||
GrAssert(kRed_Color == x->fChildren[c]->fColor);
|
||||
SkASSERT(kRed_Color == x->fChildren[c]->fColor);
|
||||
// replace x with c1, making c1 black, preserves all red-black tree
|
||||
// props.
|
||||
Node* c1 = x->fChildren[c];
|
||||
if (x == fFirst) {
|
||||
GrAssert(c == kRight_Child);
|
||||
SkASSERT(c == kRight_Child);
|
||||
fFirst = c1;
|
||||
while (NULL != fFirst->fChildren[kLeft_Child]) {
|
||||
fFirst = fFirst->fChildren[kLeft_Child];
|
||||
}
|
||||
GrAssert(fFirst == SuccessorNode(x));
|
||||
SkASSERT(fFirst == SuccessorNode(x));
|
||||
} else if (x == fLast) {
|
||||
GrAssert(c == kLeft_Child);
|
||||
SkASSERT(c == kLeft_Child);
|
||||
fLast = c1;
|
||||
while (NULL != fLast->fChildren[kRight_Child]) {
|
||||
fLast = fLast->fChildren[kRight_Child];
|
||||
}
|
||||
GrAssert(fLast == PredecessorNode(x));
|
||||
SkASSERT(fLast == PredecessorNode(x));
|
||||
}
|
||||
c1->fParent = p;
|
||||
p->fChildren[pc] = c1;
|
||||
@ -847,43 +847,43 @@ void GrRedBlackTree<T,C>::RecursiveDelete(Node* x) {
|
||||
template <typename T, typename C>
|
||||
void GrRedBlackTree<T,C>::validate() const {
|
||||
if (fCount) {
|
||||
GrAssert(NULL == fRoot->fParent);
|
||||
GrAssert(NULL != fFirst);
|
||||
GrAssert(NULL != fLast);
|
||||
SkASSERT(NULL == fRoot->fParent);
|
||||
SkASSERT(NULL != fFirst);
|
||||
SkASSERT(NULL != fLast);
|
||||
|
||||
GrAssert(kBlack_Color == fRoot->fColor);
|
||||
SkASSERT(kBlack_Color == fRoot->fColor);
|
||||
if (1 == fCount) {
|
||||
GrAssert(fFirst == fRoot);
|
||||
GrAssert(fLast == fRoot);
|
||||
GrAssert(0 == fRoot->fChildren[kLeft_Child]);
|
||||
GrAssert(0 == fRoot->fChildren[kRight_Child]);
|
||||
SkASSERT(fFirst == fRoot);
|
||||
SkASSERT(fLast == fRoot);
|
||||
SkASSERT(0 == fRoot->fChildren[kLeft_Child]);
|
||||
SkASSERT(0 == fRoot->fChildren[kRight_Child]);
|
||||
}
|
||||
} else {
|
||||
GrAssert(NULL == fRoot);
|
||||
GrAssert(NULL == fFirst);
|
||||
GrAssert(NULL == fLast);
|
||||
SkASSERT(NULL == fRoot);
|
||||
SkASSERT(NULL == fFirst);
|
||||
SkASSERT(NULL == fLast);
|
||||
}
|
||||
#if DEEP_VALIDATE
|
||||
int bh;
|
||||
int count = checkNode(fRoot, &bh);
|
||||
GrAssert(count == fCount);
|
||||
SkASSERT(count == fCount);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename T, typename C>
|
||||
int GrRedBlackTree<T,C>::checkNode(Node* n, int* bh) const {
|
||||
if (NULL != n) {
|
||||
GrAssert(validateChildRelations(n, false));
|
||||
SkASSERT(validateChildRelations(n, false));
|
||||
if (kBlack_Color == n->fColor) {
|
||||
*bh += 1;
|
||||
}
|
||||
GrAssert(!fComp(n->fItem, fFirst->fItem));
|
||||
GrAssert(!fComp(fLast->fItem, n->fItem));
|
||||
SkASSERT(!fComp(n->fItem, fFirst->fItem));
|
||||
SkASSERT(!fComp(fLast->fItem, n->fItem));
|
||||
int leftBh = *bh;
|
||||
int rightBh = *bh;
|
||||
int cl = checkNode(n->fChildren[kLeft_Child], &leftBh);
|
||||
int cr = checkNode(n->fChildren[kRight_Child], &rightBh);
|
||||
GrAssert(leftBh == rightBh);
|
||||
SkASSERT(leftBh == rightBh);
|
||||
*bh = leftBh;
|
||||
return 1 + cl + cr;
|
||||
}
|
||||
@ -957,7 +957,7 @@ void GrRedBlackTree<T,C>::UnitTest() {
|
||||
for (int i = 0; i < 10000; ++i) {
|
||||
int x = r.nextU()%100;
|
||||
SkDEBUGCODE(Iter xi = ) tree.insert(x);
|
||||
GrAssert(*xi == x);
|
||||
SkASSERT(*xi == x);
|
||||
++count[x];
|
||||
}
|
||||
|
||||
@ -965,11 +965,11 @@ void GrRedBlackTree<T,C>::UnitTest() {
|
||||
++count[0];
|
||||
tree.insert(99);
|
||||
++count[99];
|
||||
GrAssert(*tree.begin() == 0);
|
||||
GrAssert(*tree.last() == 99);
|
||||
GrAssert(--(++tree.begin()) == tree.begin());
|
||||
GrAssert(--tree.end() == tree.last());
|
||||
GrAssert(tree.count() == 10002);
|
||||
SkASSERT(*tree.begin() == 0);
|
||||
SkASSERT(*tree.last() == 99);
|
||||
SkASSERT(--(++tree.begin()) == tree.begin());
|
||||
SkASSERT(--tree.end() == tree.last());
|
||||
SkASSERT(tree.count() == 10002);
|
||||
|
||||
int c = 0;
|
||||
// check that we iterate through the correct number of
|
||||
@ -978,9 +978,9 @@ void GrRedBlackTree<T,C>::UnitTest() {
|
||||
Iter b = a;
|
||||
++b;
|
||||
++c;
|
||||
GrAssert(b == tree.end() || *a <= *b);
|
||||
SkASSERT(b == tree.end() || *a <= *b);
|
||||
}
|
||||
GrAssert(c == tree.count());
|
||||
SkASSERT(c == tree.count());
|
||||
|
||||
// check that the tree reports the correct number of each int
|
||||
// and that we can iterate through them correctly both forward
|
||||
@ -988,14 +988,14 @@ void GrRedBlackTree<T,C>::UnitTest() {
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
int c;
|
||||
c = tree.countOf(i);
|
||||
GrAssert(c == count[i]);
|
||||
SkASSERT(c == count[i]);
|
||||
c = 0;
|
||||
Iter iter = tree.findFirst(i);
|
||||
while (iter != tree.end() && *iter == i) {
|
||||
++c;
|
||||
++iter;
|
||||
}
|
||||
GrAssert(count[i] == c);
|
||||
SkASSERT(count[i] == c);
|
||||
c = 0;
|
||||
iter = tree.findLast(i);
|
||||
if (iter != tree.end()) {
|
||||
@ -1012,7 +1012,7 @@ void GrRedBlackTree<T,C>::UnitTest() {
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
GrAssert(c == count[i]);
|
||||
SkASSERT(c == count[i]);
|
||||
}
|
||||
// remove all the ints between 25 and 74. Randomly chose to remove
|
||||
// the first, last, or any entry for each.
|
||||
@ -1035,35 +1035,35 @@ void GrRedBlackTree<T,C>::UnitTest() {
|
||||
}
|
||||
tree.remove(iter);
|
||||
}
|
||||
GrAssert(0 == count[i]);
|
||||
GrAssert(tree.findFirst(i) == tree.end());
|
||||
GrAssert(tree.findLast(i) == tree.end());
|
||||
GrAssert(tree.find(i) == tree.end());
|
||||
SkASSERT(0 == count[i]);
|
||||
SkASSERT(tree.findFirst(i) == tree.end());
|
||||
SkASSERT(tree.findLast(i) == tree.end());
|
||||
SkASSERT(tree.find(i) == tree.end());
|
||||
}
|
||||
// remove all of the 0 entries. (tests removing begin())
|
||||
GrAssert(*tree.begin() == 0);
|
||||
GrAssert(*(--tree.end()) == 99);
|
||||
SkASSERT(*tree.begin() == 0);
|
||||
SkASSERT(*(--tree.end()) == 99);
|
||||
while (0 != tree.countOf(0)) {
|
||||
--count[0];
|
||||
tree.remove(tree.find(0));
|
||||
}
|
||||
GrAssert(0 == count[0]);
|
||||
GrAssert(tree.findFirst(0) == tree.end());
|
||||
GrAssert(tree.findLast(0) == tree.end());
|
||||
GrAssert(tree.find(0) == tree.end());
|
||||
GrAssert(0 < *tree.begin());
|
||||
SkASSERT(0 == count[0]);
|
||||
SkASSERT(tree.findFirst(0) == tree.end());
|
||||
SkASSERT(tree.findLast(0) == tree.end());
|
||||
SkASSERT(tree.find(0) == tree.end());
|
||||
SkASSERT(0 < *tree.begin());
|
||||
|
||||
// remove all the 99 entries (tests removing last()).
|
||||
while (0 != tree.countOf(99)) {
|
||||
--count[99];
|
||||
tree.remove(tree.find(99));
|
||||
}
|
||||
GrAssert(0 == count[99]);
|
||||
GrAssert(tree.findFirst(99) == tree.end());
|
||||
GrAssert(tree.findLast(99) == tree.end());
|
||||
GrAssert(tree.find(99) == tree.end());
|
||||
GrAssert(99 > *(--tree.end()));
|
||||
GrAssert(tree.last() == --tree.end());
|
||||
SkASSERT(0 == count[99]);
|
||||
SkASSERT(tree.findFirst(99) == tree.end());
|
||||
SkASSERT(tree.findLast(99) == tree.end());
|
||||
SkASSERT(tree.find(99) == tree.end());
|
||||
SkASSERT(99 > *(--tree.end()));
|
||||
SkASSERT(tree.last() == --tree.end());
|
||||
|
||||
// Make sure iteration still goes through correct number of entries
|
||||
// and is still sorted correctly.
|
||||
@ -1072,21 +1072,21 @@ void GrRedBlackTree<T,C>::UnitTest() {
|
||||
Iter b = a;
|
||||
++b;
|
||||
++c;
|
||||
GrAssert(b == tree.end() || *a <= *b);
|
||||
SkASSERT(b == tree.end() || *a <= *b);
|
||||
}
|
||||
GrAssert(c == tree.count());
|
||||
SkASSERT(c == tree.count());
|
||||
|
||||
// repeat check that correct number of each entry is in the tree
|
||||
// and iterates correctly both forward and backward.
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
GrAssert(tree.countOf(i) == count[i]);
|
||||
SkASSERT(tree.countOf(i) == count[i]);
|
||||
int c = 0;
|
||||
Iter iter = tree.findFirst(i);
|
||||
while (iter != tree.end() && *iter == i) {
|
||||
++c;
|
||||
++iter;
|
||||
}
|
||||
GrAssert(count[i] == c);
|
||||
SkASSERT(count[i] == c);
|
||||
c = 0;
|
||||
iter = tree.findLast(i);
|
||||
if (iter != tree.end()) {
|
||||
@ -1103,7 +1103,7 @@ void GrRedBlackTree<T,C>::UnitTest() {
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
GrAssert(count[i] == c);
|
||||
SkASSERT(count[i] == c);
|
||||
}
|
||||
|
||||
// remove all entries
|
||||
|
@ -26,8 +26,8 @@ GrResource::GrResource(GrGpu* gpu, bool isWrapped) {
|
||||
|
||||
GrResource::~GrResource() {
|
||||
// subclass should have released this.
|
||||
GrAssert(0 == fDeferredRefCount);
|
||||
GrAssert(!this->isValid());
|
||||
SkASSERT(0 == fDeferredRefCount);
|
||||
SkASSERT(!this->isValid());
|
||||
}
|
||||
|
||||
void GrResource::release() {
|
||||
|
@ -28,7 +28,7 @@ GrResourceKey::ResourceType GrResourceKey::GenerateResourceType() {
|
||||
GrResourceEntry::GrResourceEntry(const GrResourceKey& key, GrResource* resource)
|
||||
: fKey(key), fResource(resource) {
|
||||
// we assume ownership of the resource, and will unref it when we die
|
||||
GrAssert(resource);
|
||||
SkASSERT(resource);
|
||||
resource->ref();
|
||||
}
|
||||
|
||||
@ -39,8 +39,8 @@ GrResourceEntry::~GrResourceEntry() {
|
||||
|
||||
#if GR_DEBUG
|
||||
void GrResourceEntry::validate() const {
|
||||
GrAssert(fResource);
|
||||
GrAssert(fResource->getCacheEntry() == this);
|
||||
SkASSERT(fResource);
|
||||
SkASSERT(fResource->getCacheEntry() == this);
|
||||
fResource->validate();
|
||||
}
|
||||
#endif
|
||||
@ -126,7 +126,7 @@ void GrResourceCache::internalDetach(GrResourceEntry* entry,
|
||||
#endif
|
||||
|
||||
} else {
|
||||
GrAssert(kAccountFor_BudgetBehavior == behavior);
|
||||
SkASSERT(kAccountFor_BudgetBehavior == behavior);
|
||||
|
||||
fEntryCount -= 1;
|
||||
fEntryBytes -= entry->resource()->sizeInBytes();
|
||||
@ -142,7 +142,7 @@ void GrResourceCache::attachToHead(GrResourceEntry* entry,
|
||||
fClientDetachedCount -= 1;
|
||||
fClientDetachedBytes -= entry->resource()->sizeInBytes();
|
||||
} else {
|
||||
GrAssert(kAccountFor_BudgetBehavior == behavior);
|
||||
SkASSERT(kAccountFor_BudgetBehavior == behavior);
|
||||
|
||||
fEntryCount += 1;
|
||||
fEntryBytes += entry->resource()->sizeInBytes();
|
||||
@ -199,12 +199,12 @@ GrResource* GrResourceCache::find(const GrResourceKey& key, uint32_t ownershipFl
|
||||
void GrResourceCache::addResource(const GrResourceKey& key,
|
||||
GrResource* resource,
|
||||
uint32_t ownershipFlags) {
|
||||
GrAssert(NULL == resource->getCacheEntry());
|
||||
SkASSERT(NULL == resource->getCacheEntry());
|
||||
// we don't expect to create new resources during a purge. In theory
|
||||
// this could cause purgeAsNeeded() into an infinite loop (e.g.
|
||||
// each resource destroyed creates and locks 2 resources and
|
||||
// unlocks 1 thereby causing a new purge).
|
||||
GrAssert(!fPurging);
|
||||
SkASSERT(!fPurging);
|
||||
GrAutoResourceCacheValidate atcv(this);
|
||||
|
||||
GrResourceEntry* entry = SkNEW_ARGS(GrResourceEntry, (key, resource));
|
||||
@ -299,7 +299,7 @@ void GrResourceCache::purgeAsNeeded(int extraCount, size_t extraBytes) {
|
||||
}
|
||||
|
||||
void GrResourceCache::deleteResource(GrResourceEntry* entry) {
|
||||
GrAssert(1 == entry->fResource->getRefCnt());
|
||||
SkASSERT(1 == entry->fResource->getRefCnt());
|
||||
|
||||
// remove from our cache
|
||||
fCache.remove(entry->key(), entry);
|
||||
@ -361,15 +361,15 @@ void GrResourceCache::purgeAllUnlocked() {
|
||||
this->purgeAsNeeded();
|
||||
|
||||
#if GR_DEBUG
|
||||
GrAssert(fExclusiveList.countEntries() == fClientDetachedCount);
|
||||
GrAssert(countBytes(fExclusiveList) == fClientDetachedBytes);
|
||||
SkASSERT(fExclusiveList.countEntries() == fClientDetachedCount);
|
||||
SkASSERT(countBytes(fExclusiveList) == fClientDetachedBytes);
|
||||
if (!fCache.count()) {
|
||||
// Items may have been detached from the cache (such as the backing
|
||||
// texture for an SkGpuDevice). The above purge would not have removed
|
||||
// them.
|
||||
GrAssert(fEntryCount == fClientDetachedCount);
|
||||
GrAssert(fEntryBytes == fClientDetachedBytes);
|
||||
GrAssert(fList.isEmpty());
|
||||
SkASSERT(fEntryCount == fClientDetachedCount);
|
||||
SkASSERT(fEntryBytes == fClientDetachedBytes);
|
||||
SkASSERT(fList.isEmpty());
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -401,11 +401,11 @@ static bool both_zero_or_nonzero(int count, size_t bytes) {
|
||||
void GrResourceCache::validate() const {
|
||||
fList.validate();
|
||||
fExclusiveList.validate();
|
||||
GrAssert(both_zero_or_nonzero(fEntryCount, fEntryBytes));
|
||||
GrAssert(both_zero_or_nonzero(fClientDetachedCount, fClientDetachedBytes));
|
||||
GrAssert(fClientDetachedBytes <= fEntryBytes);
|
||||
GrAssert(fClientDetachedCount <= fEntryCount);
|
||||
GrAssert((fEntryCount - fClientDetachedCount) == fCache.count());
|
||||
SkASSERT(both_zero_or_nonzero(fEntryCount, fEntryBytes));
|
||||
SkASSERT(both_zero_or_nonzero(fClientDetachedCount, fClientDetachedBytes));
|
||||
SkASSERT(fClientDetachedBytes <= fEntryBytes);
|
||||
SkASSERT(fClientDetachedCount <= fEntryCount);
|
||||
SkASSERT((fEntryCount - fClientDetachedCount) == fCache.count());
|
||||
|
||||
fCache.validate();
|
||||
|
||||
@ -426,20 +426,20 @@ void GrResourceCache::validate() const {
|
||||
int count = 0;
|
||||
for ( ; NULL != entry; entry = iter.next()) {
|
||||
entry->validate();
|
||||
GrAssert(fCache.find(entry->key()));
|
||||
SkASSERT(fCache.find(entry->key()));
|
||||
count += 1;
|
||||
}
|
||||
GrAssert(count == fEntryCount - fClientDetachedCount);
|
||||
SkASSERT(count == fEntryCount - fClientDetachedCount);
|
||||
|
||||
size_t bytes = countBytes(fList);
|
||||
GrAssert(bytes == fEntryBytes - fClientDetachedBytes);
|
||||
SkASSERT(bytes == fEntryBytes - fClientDetachedBytes);
|
||||
|
||||
bytes = countBytes(fExclusiveList);
|
||||
GrAssert(bytes == fClientDetachedBytes);
|
||||
SkASSERT(bytes == fClientDetachedBytes);
|
||||
|
||||
GrAssert(fList.countEntries() == fEntryCount - fClientDetachedCount);
|
||||
SkASSERT(fList.countEntries() == fEntryCount - fClientDetachedCount);
|
||||
|
||||
GrAssert(fExclusiveList.countEntries() == fClientDetachedCount);
|
||||
SkASSERT(fExclusiveList.countEntries() == fClientDetachedCount);
|
||||
}
|
||||
#endif // GR_DEBUG
|
||||
|
||||
|
@ -284,7 +284,7 @@ public:
|
||||
bool operator == (const GrStencilSettings& s) const {
|
||||
static const size_t gCompareSize = sizeof(GrStencilSettings) -
|
||||
sizeof(fFlags);
|
||||
GrAssert((const char*)&fFlags + sizeof(fFlags) ==
|
||||
SkASSERT((const char*)&fFlags + sizeof(fFlags) ==
|
||||
(const char*)this + sizeof(GrStencilSettings));
|
||||
if (this->isDisabled() & s.isDisabled()) { // using & not &&
|
||||
return true;
|
||||
|
@ -15,8 +15,8 @@
|
||||
#include "SkStrokeRec.h"
|
||||
|
||||
GrPathRenderer* GrStencilAndCoverPathRenderer::Create(GrContext* context) {
|
||||
GrAssert(NULL != context);
|
||||
GrAssert(NULL != context->getGpu());
|
||||
SkASSERT(NULL != context);
|
||||
SkASSERT(NULL != context->getGpu());
|
||||
if (context->getGpu()->caps()->pathStencilingSupport()) {
|
||||
return SkNEW_ARGS(GrStencilAndCoverPathRenderer, (context->getGpu()));
|
||||
} else {
|
||||
@ -25,7 +25,7 @@ GrPathRenderer* GrStencilAndCoverPathRenderer::Create(GrContext* context) {
|
||||
}
|
||||
|
||||
GrStencilAndCoverPathRenderer::GrStencilAndCoverPathRenderer(GrGpu* gpu) {
|
||||
GrAssert(gpu->caps()->pathStencilingSupport());
|
||||
SkASSERT(gpu->caps()->pathStencilingSupport());
|
||||
fGpu = gpu;
|
||||
gpu->ref();
|
||||
}
|
||||
@ -53,7 +53,7 @@ GrPathRenderer::StencilSupport GrStencilAndCoverPathRenderer::onGetStencilSuppor
|
||||
void GrStencilAndCoverPathRenderer::onStencilPath(const SkPath& path,
|
||||
const SkStrokeRec& stroke,
|
||||
GrDrawTarget* target) {
|
||||
GrAssert(!path.isInverseFillType());
|
||||
SkASSERT(!path.isInverseFillType());
|
||||
SkAutoTUnref<GrPath> p(fGpu->createPath(path));
|
||||
target->stencilPath(p, stroke, path.getFillType());
|
||||
}
|
||||
@ -62,11 +62,11 @@ bool GrStencilAndCoverPathRenderer::onDrawPath(const SkPath& path,
|
||||
const SkStrokeRec& stroke,
|
||||
GrDrawTarget* target,
|
||||
bool antiAlias) {
|
||||
GrAssert(!antiAlias);
|
||||
GrAssert(!stroke.isHairlineStyle());
|
||||
SkASSERT(!antiAlias);
|
||||
SkASSERT(!stroke.isHairlineStyle());
|
||||
|
||||
GrDrawState* drawState = target->drawState();
|
||||
GrAssert(drawState->getStencil().isDisabled());
|
||||
SkASSERT(drawState->getStencil().isDisabled());
|
||||
|
||||
SkAutoTUnref<GrPath> p(fGpu->createPath(path));
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
SK_DEFINE_INST_COUNT(GrStencilBuffer)
|
||||
|
||||
void GrStencilBuffer::transferToCache() {
|
||||
GrAssert(NULL == this->getCacheEntry());
|
||||
SkASSERT(NULL == this->getCacheEntry());
|
||||
|
||||
this->getGpu()->getContext()->addStencilBuffer(this);
|
||||
}
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
template <typename ELEM, typename KEY>
|
||||
int GrTBSearch(const ELEM array[], int count, KEY target) {
|
||||
GrAssert(count >= 0);
|
||||
SkASSERT(count >= 0);
|
||||
if (0 == count) {
|
||||
// we should insert it at 0
|
||||
return ~0;
|
||||
|
@ -108,7 +108,7 @@ int GrTHashTable<T, Key, kHashBits>::searchArray(const Key& key) const {
|
||||
if (Key::EQ(*array[high], key)) {
|
||||
// above search should have found the first occurrence if there
|
||||
// are multiple.
|
||||
GrAssert(0 == high || Key::LT(*array[high - 1], key));
|
||||
SkASSERT(0 == high || Key::LT(*array[high - 1], key));
|
||||
return high;
|
||||
}
|
||||
|
||||
@ -147,7 +147,7 @@ T* GrTHashTable<T, Key, kHashBits>::find(const Key& key, const FindFuncType& fin
|
||||
|
||||
// above search should have found the first occurrence if there
|
||||
// are multiple.
|
||||
GrAssert(0 == index || Key::LT(*array[index - 1], key));
|
||||
SkASSERT(0 == index || Key::LT(*array[index - 1], key));
|
||||
|
||||
for ( ; index < count() && Key::EQ(*array[index], key); ++index) {
|
||||
if (findFunc(fSorted[index])) {
|
||||
@ -184,14 +184,14 @@ void GrTHashTable<T, Key, kHashBits>::remove(const Key& key, const T* elem) {
|
||||
|
||||
// remove from our sorted array
|
||||
index = this->searchArray(key);
|
||||
GrAssert(index >= 0);
|
||||
SkASSERT(index >= 0);
|
||||
// if there are multiple matches searchArray will give us the first match
|
||||
// march forward until we find elem.
|
||||
while (elem != fSorted[index]) {
|
||||
++index;
|
||||
GrAssert(index < fSorted.count());
|
||||
SkASSERT(index < fSorted.count());
|
||||
}
|
||||
GrAssert(elem == fSorted[index]);
|
||||
SkASSERT(elem == fSorted[index]);
|
||||
fSorted.remove(index);
|
||||
}
|
||||
|
||||
@ -230,7 +230,7 @@ template <typename T, typename Key, size_t kHashBits>
|
||||
void GrTHashTable<T, Key, kHashBits>::validate() const {
|
||||
int count = fSorted.count();
|
||||
for (int i = 1; i < count; i++) {
|
||||
GrAssert(Key::LT(*fSorted[i - 1], *fSorted[i]) ||
|
||||
SkASSERT(Key::LT(*fSorted[i - 1], *fSorted[i]) ||
|
||||
Key::EQ(*fSorted[i - 1], *fSorted[i]));
|
||||
}
|
||||
}
|
||||
|
@ -31,8 +31,8 @@ void GrTextContext::flushGlyphs() {
|
||||
|
||||
if (fCurrVertex > 0) {
|
||||
// setup our sampler state for our text texture/atlas
|
||||
GrAssert(GrIsALIGN4(fCurrVertex));
|
||||
GrAssert(fCurrTexture);
|
||||
SkASSERT(GrIsALIGN4(fCurrVertex));
|
||||
SkASSERT(fCurrTexture);
|
||||
GrTextureParams params(SkShader::kRepeat_TileMode, GrTextureParams::kNone_FilterMode);
|
||||
|
||||
// This effect could be stored with one of the cache objects (atlas?)
|
||||
@ -193,14 +193,14 @@ void GrTextContext::drawPackedGlyph(GrGlyph::PackedID packed,
|
||||
}
|
||||
|
||||
HAS_ATLAS:
|
||||
GrAssert(glyph->fAtlas);
|
||||
SkASSERT(glyph->fAtlas);
|
||||
|
||||
// now promote them to fixed (TODO: Rethink using fixed pt).
|
||||
width = SkIntToFixed(width);
|
||||
height = SkIntToFixed(height);
|
||||
|
||||
GrTexture* texture = glyph->fAtlas->texture();
|
||||
GrAssert(texture);
|
||||
SkASSERT(texture);
|
||||
|
||||
if (fCurrTexture != texture || fCurrVertex + 4 > fMaxVertices) {
|
||||
this->flushGlyphs();
|
||||
@ -237,7 +237,7 @@ HAS_ATLAS:
|
||||
GrTCast<void**>(&fVertices),
|
||||
NULL);
|
||||
GrAlwaysAssert(success);
|
||||
GrAssert(2*sizeof(GrPoint) == fDrawTarget->getDrawState().getVertexSize());
|
||||
SkASSERT(2*sizeof(GrPoint) == fDrawTarget->getDrawState().getVertexSize());
|
||||
}
|
||||
|
||||
GrFixed tx = SkIntToFixed(glyph->fAtlasLocation.fX);
|
||||
|
@ -50,7 +50,7 @@ GrTextStrike* GrFontCache::generateStrike(GrFontScaler* scaler,
|
||||
if (fHead) {
|
||||
fHead->fPrev = strike;
|
||||
} else {
|
||||
GrAssert(NULL == fTail);
|
||||
SkASSERT(NULL == fTail);
|
||||
fTail = strike;
|
||||
}
|
||||
strike->fPrev = NULL;
|
||||
@ -82,7 +82,7 @@ void GrFontCache::purgeExceptFor(GrTextStrike* preserveStrike) {
|
||||
// keep purging if we won't free up any atlases with this strike.
|
||||
purge = (NULL == strikeToPurge->fAtlas);
|
||||
int index = fCache.slowFindIndex(strikeToPurge);
|
||||
GrAssert(index >= 0);
|
||||
SkASSERT(index >= 0);
|
||||
fCache.removeAt(index, strikeToPurge->fFontScalerKey->getHash());
|
||||
this->detachStrikeFromList(strikeToPurge);
|
||||
delete strikeToPurge;
|
||||
@ -105,7 +105,7 @@ void GrFontCache::freeAtlasExceptFor(GrTextStrike* preserveStrike) {
|
||||
if (strikeToPurge->removeUnusedAtlases()) {
|
||||
if (NULL == strikeToPurge->fAtlas) {
|
||||
int index = fCache.slowFindIndex(strikeToPurge);
|
||||
GrAssert(index >= 0);
|
||||
SkASSERT(index >= 0);
|
||||
fCache.removeAt(index, strikeToPurge->fFontScalerKey->getHash());
|
||||
this->detachStrikeFromList(strikeToPurge);
|
||||
delete strikeToPurge;
|
||||
@ -119,12 +119,12 @@ void GrFontCache::freeAtlasExceptFor(GrTextStrike* preserveStrike) {
|
||||
void GrFontCache::validate() const {
|
||||
int count = fCache.count();
|
||||
if (0 == count) {
|
||||
GrAssert(!fHead);
|
||||
GrAssert(!fTail);
|
||||
SkASSERT(!fHead);
|
||||
SkASSERT(!fTail);
|
||||
} else if (1 == count) {
|
||||
GrAssert(fHead == fTail);
|
||||
SkASSERT(fHead == fTail);
|
||||
} else {
|
||||
GrAssert(fHead != fTail);
|
||||
SkASSERT(fHead != fTail);
|
||||
}
|
||||
|
||||
int count2 = 0;
|
||||
@ -133,7 +133,7 @@ void GrFontCache::validate() const {
|
||||
count2 += 1;
|
||||
strike = strike->fNext;
|
||||
}
|
||||
GrAssert(count == count2);
|
||||
SkASSERT(count == count2);
|
||||
|
||||
count2 = 0;
|
||||
strike = fTail;
|
||||
@ -141,7 +141,7 @@ void GrFontCache::validate() const {
|
||||
count2 += 1;
|
||||
strike = strike->fPrev;
|
||||
}
|
||||
GrAssert(count == count2);
|
||||
SkASSERT(count == count2);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -223,9 +223,9 @@ bool GrTextStrike::getGlyphAtlas(GrGlyph* glyph, GrFontScaler* scaler,
|
||||
if ((++gCounter % 10) == 0) return false;
|
||||
#endif
|
||||
|
||||
GrAssert(glyph);
|
||||
GrAssert(scaler);
|
||||
GrAssert(fCache.contains(glyph));
|
||||
SkASSERT(glyph);
|
||||
SkASSERT(scaler);
|
||||
SkASSERT(fCache.contains(glyph));
|
||||
if (glyph->fAtlas) {
|
||||
glyph->fAtlas->setDrawToken(currentDrawToken);
|
||||
return true;
|
||||
|
@ -32,18 +32,18 @@ private:
|
||||
|
||||
void GrFontCache::detachStrikeFromList(GrTextStrike* strike) {
|
||||
if (strike->fPrev) {
|
||||
GrAssert(fHead != strike);
|
||||
SkASSERT(fHead != strike);
|
||||
strike->fPrev->fNext = strike->fNext;
|
||||
} else {
|
||||
GrAssert(fHead == strike);
|
||||
SkASSERT(fHead == strike);
|
||||
fHead = strike->fNext;
|
||||
}
|
||||
|
||||
if (strike->fNext) {
|
||||
GrAssert(fTail != strike);
|
||||
SkASSERT(fTail != strike);
|
||||
strike->fNext->fPrev = strike->fPrev;
|
||||
} else {
|
||||
GrAssert(fTail == strike);
|
||||
SkASSERT(fTail == strike);
|
||||
fTail = strike->fPrev;
|
||||
}
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ void GrTexture::writePixels(int left, int top, int width, int height,
|
||||
}
|
||||
|
||||
void GrTexture::onRelease() {
|
||||
GrAssert(!this->isSetFlag((GrTextureFlags) kReturnToCache_FlagBit));
|
||||
SkASSERT(!this->isSetFlag((GrTextureFlags) kReturnToCache_FlagBit));
|
||||
INHERITED::onRelease();
|
||||
}
|
||||
|
||||
@ -88,19 +88,19 @@ void GrTexture::onAbandon() {
|
||||
void GrTexture::validateDesc() const {
|
||||
if (NULL != this->asRenderTarget()) {
|
||||
// This texture has a render target
|
||||
GrAssert(0 != (fDesc.fFlags & kRenderTarget_GrTextureFlagBit));
|
||||
SkASSERT(0 != (fDesc.fFlags & kRenderTarget_GrTextureFlagBit));
|
||||
|
||||
if (NULL != this->asRenderTarget()->getStencilBuffer()) {
|
||||
GrAssert(0 != (fDesc.fFlags & kNoStencil_GrTextureFlagBit));
|
||||
SkASSERT(0 != (fDesc.fFlags & kNoStencil_GrTextureFlagBit));
|
||||
} else {
|
||||
GrAssert(0 == (fDesc.fFlags & kNoStencil_GrTextureFlagBit));
|
||||
SkASSERT(0 == (fDesc.fFlags & kNoStencil_GrTextureFlagBit));
|
||||
}
|
||||
|
||||
GrAssert(fDesc.fSampleCnt == this->asRenderTarget()->numSamples());
|
||||
SkASSERT(fDesc.fSampleCnt == this->asRenderTarget()->numSamples());
|
||||
} else {
|
||||
GrAssert(0 == (fDesc.fFlags & kRenderTarget_GrTextureFlagBit));
|
||||
GrAssert(0 == (fDesc.fFlags & kNoStencil_GrTextureFlagBit));
|
||||
GrAssert(0 == fDesc.fSampleCnt);
|
||||
SkASSERT(0 == (fDesc.fFlags & kRenderTarget_GrTextureFlagBit));
|
||||
SkASSERT(0 == (fDesc.fFlags & kNoStencil_GrTextureFlagBit));
|
||||
SkASSERT(0 == fDesc.fSampleCnt);
|
||||
}
|
||||
}
|
||||
|
||||
@ -173,8 +173,8 @@ GrResourceKey GrTexture::ComputeScratchKey(const GrTextureDesc& desc) {
|
||||
// Instead of a client-provided key of the texture contents we create a key from the
|
||||
// descriptor.
|
||||
GR_STATIC_ASSERT(sizeof(idKey) >= 16);
|
||||
GrAssert(desc.fHeight < (1 << 16));
|
||||
GrAssert(desc.fWidth < (1 << 16));
|
||||
SkASSERT(desc.fHeight < (1 << 16));
|
||||
SkASSERT(desc.fWidth < (1 << 16));
|
||||
idKey.fData32[0] = (desc.fWidth) | (desc.fHeight << 16);
|
||||
idKey.fData32[1] = desc.fConfig | desc.fSampleCnt << 16;
|
||||
idKey.fData32[2] = desc.fFlags;
|
||||
|
@ -42,8 +42,8 @@ GrTextureAccess::GrTextureAccess(GrTexture* texture,
|
||||
void GrTextureAccess::reset(GrTexture* texture,
|
||||
const char* swizzle,
|
||||
const GrTextureParams& params) {
|
||||
GrAssert(NULL != texture);
|
||||
GrAssert(strlen(swizzle) >= 1 && strlen(swizzle) <= 4);
|
||||
SkASSERT(NULL != texture);
|
||||
SkASSERT(strlen(swizzle) >= 1 && strlen(swizzle) <= 4);
|
||||
|
||||
fParams = params;
|
||||
fTexture.reset(SkRef(texture));
|
||||
@ -54,8 +54,8 @@ void GrTextureAccess::reset(GrTexture* texture,
|
||||
const char* swizzle,
|
||||
GrTextureParams::FilterMode filterMode,
|
||||
SkShader::TileMode tileXAndY) {
|
||||
GrAssert(NULL != texture);
|
||||
GrAssert(strlen(swizzle) >= 1 && strlen(swizzle) <= 4);
|
||||
SkASSERT(NULL != texture);
|
||||
SkASSERT(strlen(swizzle) >= 1 && strlen(swizzle) <= 4);
|
||||
|
||||
fParams.reset(tileXAndY, filterMode);
|
||||
fTexture.reset(SkRef(texture));
|
||||
@ -64,7 +64,7 @@ void GrTextureAccess::reset(GrTexture* texture,
|
||||
|
||||
void GrTextureAccess::reset(GrTexture* texture,
|
||||
const GrTextureParams& params) {
|
||||
GrAssert(NULL != texture);
|
||||
SkASSERT(NULL != texture);
|
||||
fTexture.reset(SkRef(texture));
|
||||
fParams = params;
|
||||
memcpy(fSwizzle, "rgba", 5);
|
||||
@ -74,7 +74,7 @@ void GrTextureAccess::reset(GrTexture* texture,
|
||||
void GrTextureAccess::reset(GrTexture* texture,
|
||||
GrTextureParams::FilterMode filterMode,
|
||||
SkShader::TileMode tileXAndY) {
|
||||
GrAssert(NULL != texture);
|
||||
SkASSERT(NULL != texture);
|
||||
fTexture.reset(SkRef(texture));
|
||||
fParams.reset(tileXAndY, filterMode);
|
||||
memcpy(fSwizzle, "rgba", 5);
|
||||
|
@ -72,7 +72,7 @@ public:
|
||||
GrTexture** texture)
|
||||
: fDevice(NULL)
|
||||
, fTexture(NULL) {
|
||||
GrAssert(NULL != texture);
|
||||
SkASSERT(NULL != texture);
|
||||
*texture = this->set(device, bitmap, params);
|
||||
}
|
||||
|
||||
@ -149,7 +149,7 @@ static SkBitmap make_bitmap(GrContext* context, GrRenderTarget* renderTarget) {
|
||||
}
|
||||
|
||||
SkGpuDevice* SkGpuDevice::Create(GrSurface* surface) {
|
||||
GrAssert(NULL != surface);
|
||||
SkASSERT(NULL != surface);
|
||||
if (NULL == surface->asRenderTarget() || NULL == surface->getContext()) {
|
||||
return NULL;
|
||||
}
|
||||
@ -181,7 +181,7 @@ void SkGpuDevice::initFromRenderTarget(GrContext* context,
|
||||
fRenderTarget = NULL;
|
||||
fNeedClear = false;
|
||||
|
||||
GrAssert(NULL != renderTarget);
|
||||
SkASSERT(NULL != renderTarget);
|
||||
fRenderTarget = renderTarget;
|
||||
fRenderTarget->ref();
|
||||
|
||||
@ -230,7 +230,7 @@ SkGpuDevice::SkGpuDevice(GrContext* context,
|
||||
fRenderTarget = texture->asRenderTarget();
|
||||
fRenderTarget->ref();
|
||||
|
||||
GrAssert(NULL != fRenderTarget);
|
||||
SkASSERT(NULL != fRenderTarget);
|
||||
|
||||
// wrap the bitmap with a pixelref to expose our texture
|
||||
SkGrPixelRef* pr = SkNEW_ARGS(SkGrPixelRef, (texture));
|
||||
@ -238,7 +238,7 @@ SkGpuDevice::SkGpuDevice(GrContext* context,
|
||||
} else {
|
||||
GrPrintf("--- failed to create gpu-offscreen [%d %d]\n",
|
||||
width, height);
|
||||
GrAssert(false);
|
||||
SkASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -398,7 +398,7 @@ static void check_bounds(const GrClipData& clipData,
|
||||
}
|
||||
}
|
||||
|
||||
GrAssert(devBound.contains(clipRegion.getBounds()));
|
||||
SkASSERT(devBound.contains(clipRegion.getBounds()));
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -407,7 +407,7 @@ static void check_bounds(const GrClipData& clipData,
|
||||
// call this every draw call, to ensure that the context reflects our state,
|
||||
// and not the state from some other canvas/device
|
||||
void SkGpuDevice::prepareDraw(const SkDraw& draw, bool forceIdentity) {
|
||||
GrAssert(NULL != fClipData.fClipStack);
|
||||
SkASSERT(NULL != fClipData.fClipStack);
|
||||
|
||||
fContext->setRenderTarget(fRenderTarget);
|
||||
|
||||
@ -492,7 +492,7 @@ inline bool skPaint2GrPaintNoShader(SkGpuDevice* dev,
|
||||
grPaint->setColor(GrColorPackRGBA(alpha, alpha, alpha, alpha));
|
||||
// justAlpha is currently set to true only if there is a texture,
|
||||
// so constantColor should not also be true.
|
||||
GrAssert(!constantColor);
|
||||
SkASSERT(!constantColor);
|
||||
} else {
|
||||
grPaint->setColor(SkColor2GrColor(skPaint.getColor()));
|
||||
}
|
||||
@ -1262,7 +1262,7 @@ static bool may_color_bleed(const SkRect& srcRect,
|
||||
const SkMatrix& m) {
|
||||
// Only gets called if has_aligned_samples returned false.
|
||||
// So we can assume that sampling is axis aligned but not texel aligned.
|
||||
GrAssert(!has_aligned_samples(srcRect, transformedRect));
|
||||
SkASSERT(!has_aligned_samples(srcRect, transformedRect));
|
||||
SkRect innerSrcRect(srcRect), innerTransformedRect,
|
||||
outerTransformedRect(transformedRect);
|
||||
innerSrcRect.inset(SK_ScalarHalf, SK_ScalarHalf);
|
||||
@ -1382,7 +1382,7 @@ static bool filter_texture(SkDevice* device, GrContext* context,
|
||||
GrTexture* texture, SkImageFilter* filter,
|
||||
int w, int h, const SkMatrix& ctm, SkBitmap* result,
|
||||
SkIPoint* offset) {
|
||||
GrAssert(filter);
|
||||
SkASSERT(filter);
|
||||
SkDeviceImageFilterProxy proxy(device);
|
||||
|
||||
if (filter->canFilterImageGPU()) {
|
||||
@ -1798,7 +1798,7 @@ SkGpuDevice::SkGpuDevice(GrContext* context,
|
||||
bool needClear)
|
||||
: SkDevice(make_bitmap(context, texture->asRenderTarget())) {
|
||||
|
||||
GrAssert(texture && texture->asRenderTarget());
|
||||
SkASSERT(texture && texture->asRenderTarget());
|
||||
// This constructor is called from onCreateCompatibleDevice. It has locked the RT in the texture
|
||||
// cache. We pass true for the third argument so that it will get unlocked.
|
||||
this->initFromRenderTarget(context, texture->asRenderTarget(), true);
|
||||
|
@ -189,7 +189,7 @@ GrTexture* GrLockAndRefCachedBitmapTexture(GrContext* ctx,
|
||||
}
|
||||
|
||||
void GrUnlockAndUnrefCachedBitmapTexture(GrTexture* texture) {
|
||||
GrAssert(NULL != texture->getContext());
|
||||
SkASSERT(NULL != texture->getContext());
|
||||
|
||||
texture->getContext()->unlockScratchTexture(texture);
|
||||
texture->unref();
|
||||
|
@ -88,7 +88,7 @@ GrMaskFormat SkGrFontScaler::getMaskFormat() {
|
||||
case SkMask::kLCD32_Format:
|
||||
return kA888_GrMaskFormat;
|
||||
default:
|
||||
GrAssert(!"unsupported SkMask::Format");
|
||||
SkASSERT(!"unsupported SkMask::Format");
|
||||
return kA8_GrMaskFormat;
|
||||
}
|
||||
}
|
||||
@ -142,8 +142,8 @@ bool SkGrFontScaler::getPackedGlyphImage(GrGlyph::PackedID packed,
|
||||
const SkGlyph& glyph = fStrike->getGlyphIDMetrics(GrGlyph::UnpackID(packed),
|
||||
GrGlyph::UnpackFixedX(packed),
|
||||
GrGlyph::UnpackFixedY(packed));
|
||||
GrAssert(glyph.fWidth == width);
|
||||
GrAssert(glyph.fHeight == height);
|
||||
SkASSERT(glyph.fWidth == width);
|
||||
SkASSERT(glyph.fHeight == height);
|
||||
const void* src = fStrike->findImage(glyph);
|
||||
if (NULL == src) {
|
||||
return false;
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
coordsType);
|
||||
builder->fsCodeAppend(";\n");
|
||||
if (GrConfigConversionEffect::kNone_PMConversion == fPMConversion) {
|
||||
GrAssert(fSwapRedAndBlue);
|
||||
SkASSERT(fSwapRedAndBlue);
|
||||
builder->fsCodeAppendf("\t%s = %s.bgra;\n", outputColor, outputColor);
|
||||
} else {
|
||||
const char* swiz = fSwapRedAndBlue ? "bgr" : "rgb";
|
||||
@ -89,7 +89,7 @@ public:
|
||||
drawEffect,
|
||||
conv.coordsType(),
|
||||
conv.texture(0));
|
||||
GrAssert(!(matrixKey & key));
|
||||
SkASSERT(!(matrixKey & key));
|
||||
return matrixKey | key;
|
||||
}
|
||||
|
||||
@ -111,10 +111,10 @@ GrConfigConversionEffect::GrConfigConversionEffect(GrTexture* texture,
|
||||
: GrSingleTextureEffect(texture, matrix)
|
||||
, fSwapRedAndBlue(swapRedAndBlue)
|
||||
, fPMConversion(pmConversion) {
|
||||
GrAssert(kRGBA_8888_GrPixelConfig == texture->config() ||
|
||||
SkASSERT(kRGBA_8888_GrPixelConfig == texture->config() ||
|
||||
kBGRA_8888_GrPixelConfig == texture->config());
|
||||
// Why did we pollute our texture cache instead of using a GrSingleTextureEffect?
|
||||
GrAssert(swapRedAndBlue || kNone_PMConversion != pmConversion);
|
||||
SkASSERT(swapRedAndBlue || kNone_PMConversion != pmConversion);
|
||||
}
|
||||
|
||||
const GrBackendEffectFactory& GrConfigConversionEffect::getFactory() const {
|
||||
|
@ -109,7 +109,7 @@ void GrGLConvolutionEffect::setData(const GrGLUniformManager& uman,
|
||||
const GrConvolutionEffect& conv = drawEffect.castEffect<GrConvolutionEffect>();
|
||||
GrTexture& texture = *conv.texture(0);
|
||||
// the code we generated was for a specific kernel radius
|
||||
GrAssert(conv.radius() == fRadius);
|
||||
SkASSERT(conv.radius() == fRadius);
|
||||
float imageIncrement[2] = { 0 };
|
||||
float ySign = texture.origin() != kTopLeft_GrSurfaceOrigin ? 1.0f : -1.0f;
|
||||
switch (conv.direction()) {
|
||||
@ -162,8 +162,8 @@ GrConvolutionEffect::GrConvolutionEffect(GrTexture* texture,
|
||||
bool useBounds,
|
||||
float bounds[2])
|
||||
: Gr1DKernelEffect(texture, direction, radius), fUseBounds(useBounds) {
|
||||
GrAssert(radius <= kMaxKernelRadius);
|
||||
GrAssert(NULL != kernel);
|
||||
SkASSERT(radius <= kMaxKernelRadius);
|
||||
SkASSERT(NULL != kernel);
|
||||
int width = this->width();
|
||||
for (int i = 0; i < width; i++) {
|
||||
fKernel[i] = kernel[i];
|
||||
@ -178,7 +178,7 @@ GrConvolutionEffect::GrConvolutionEffect(GrTexture* texture,
|
||||
bool useBounds,
|
||||
float bounds[2])
|
||||
: Gr1DKernelEffect(texture, direction, radius), fUseBounds(useBounds) {
|
||||
GrAssert(radius <= kMaxKernelRadius);
|
||||
SkASSERT(radius <= kMaxKernelRadius);
|
||||
int width = this->width();
|
||||
|
||||
float sum = 0.0f;
|
||||
|
@ -34,8 +34,8 @@ public:
|
||||
const char* fsCoordName;
|
||||
GrSLType fsCoordSLType;
|
||||
if (GrEffect::kCustom_CoordsType == ste.coordsType()) {
|
||||
GrAssert(ste.getMatrix().isIdentity());
|
||||
GrAssert(1 == ste.numVertexAttribs());
|
||||
SkASSERT(ste.getMatrix().isIdentity());
|
||||
SkASSERT(1 == ste.numVertexAttribs());
|
||||
fsCoordSLType = kVec2f_GrSLType;
|
||||
const char* vsVaryingName;
|
||||
builder->addVarying(kVec2f_GrSLType, "textureCoords", &vsVaryingName, &fsCoordName);
|
||||
@ -70,7 +70,7 @@ public:
|
||||
const GrDrawEffect& drawEffect) SK_OVERRIDE {
|
||||
const GrSimpleTextureEffect& ste = drawEffect.castEffect<GrSimpleTextureEffect>();
|
||||
if (GrEffect::kCustom_CoordsType == ste.coordsType()) {
|
||||
GrAssert(ste.getMatrix().isIdentity());
|
||||
SkASSERT(ste.getMatrix().isIdentity());
|
||||
} else {
|
||||
fEffectMatrix.get()->setData(uman, ste.getMatrix(), drawEffect, ste.texture(0));
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ public:
|
||||
static GrEffectRef* Create(GrTexture* tex,
|
||||
const SkMatrix& matrix,
|
||||
CoordsType coordsType = kLocal_CoordsType) {
|
||||
GrAssert(kLocal_CoordsType == coordsType || kPosition_CoordsType == coordsType);
|
||||
SkASSERT(kLocal_CoordsType == coordsType || kPosition_CoordsType == coordsType);
|
||||
AutoEffectUnref effect(SkNEW_ARGS(GrSimpleTextureEffect, (tex, matrix, GrTextureParams::kNone_FilterMode, coordsType)));
|
||||
return CreateEffectRef(effect);
|
||||
}
|
||||
@ -36,7 +36,7 @@ public:
|
||||
const SkMatrix& matrix,
|
||||
GrTextureParams::FilterMode filterMode,
|
||||
CoordsType coordsType = kLocal_CoordsType) {
|
||||
GrAssert(kLocal_CoordsType == coordsType || kPosition_CoordsType == coordsType);
|
||||
SkASSERT(kLocal_CoordsType == coordsType || kPosition_CoordsType == coordsType);
|
||||
AutoEffectUnref effect(
|
||||
SkNEW_ARGS(GrSimpleTextureEffect, (tex, matrix, filterMode, coordsType)));
|
||||
return CreateEffectRef(effect);
|
||||
@ -46,7 +46,7 @@ public:
|
||||
const SkMatrix& matrix,
|
||||
const GrTextureParams& p,
|
||||
CoordsType coordsType = kLocal_CoordsType) {
|
||||
GrAssert(kLocal_CoordsType == coordsType || kPosition_CoordsType == coordsType);
|
||||
SkASSERT(kLocal_CoordsType == coordsType || kPosition_CoordsType == coordsType);
|
||||
AutoEffectUnref effect(SkNEW_ARGS(GrSimpleTextureEffect, (tex, matrix, p, coordsType)));
|
||||
return CreateEffectRef(effect);
|
||||
}
|
||||
@ -77,7 +77,7 @@ private:
|
||||
GrTextureParams::FilterMode filterMode,
|
||||
CoordsType coordsType)
|
||||
: GrSingleTextureEffect(texture, matrix, filterMode, coordsType) {
|
||||
GrAssert(kLocal_CoordsType == coordsType || kPosition_CoordsType == coordsType);
|
||||
SkASSERT(kLocal_CoordsType == coordsType || kPosition_CoordsType == coordsType);
|
||||
}
|
||||
|
||||
GrSimpleTextureEffect(GrTexture* texture,
|
||||
@ -86,7 +86,7 @@ private:
|
||||
CoordsType coordsType)
|
||||
: GrSingleTextureEffect(texture, matrix, params, coordsType) {
|
||||
if (kCustom_CoordsType == coordsType) {
|
||||
GrAssert(matrix.isIdentity());
|
||||
SkASSERT(matrix.isIdentity());
|
||||
this->addVertexAttrib(kVec2f_GrSLType);
|
||||
}
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ void GrGLTextureDomainEffect::emitCode(GrGLShaderBuilder* builder,
|
||||
"clampCoord");
|
||||
builder->fsCodeAppend(";\n");
|
||||
} else {
|
||||
GrAssert(GrTextureDomainEffect::kDecal_WrapMode == texDom.wrapMode());
|
||||
SkASSERT(GrTextureDomainEffect::kDecal_WrapMode == texDom.wrapMode());
|
||||
|
||||
if (kImagination_GrGLVendor == builder->ctxInfo().vendor()) {
|
||||
// On the NexusS and GalaxyNexus, the other path (with the 'any'
|
||||
@ -162,14 +162,14 @@ GrEffectRef* GrTextureDomainEffect::Create(GrTexture* texture,
|
||||
// We don't currently handle domains that are empty or don't intersect the texture.
|
||||
// It is OK if the domain rect is a line or point, but it should not be inverted. We do not
|
||||
// handle rects that do not intersect the [0..1]x[0..1] rect.
|
||||
GrAssert(domain.fLeft <= domain.fRight);
|
||||
GrAssert(domain.fTop <= domain.fBottom);
|
||||
SkASSERT(domain.fLeft <= domain.fRight);
|
||||
SkASSERT(domain.fTop <= domain.fBottom);
|
||||
clippedDomain.fLeft = SkMaxScalar(domain.fLeft, kFullRect.fLeft);
|
||||
clippedDomain.fRight = SkMinScalar(domain.fRight, kFullRect.fRight);
|
||||
clippedDomain.fTop = SkMaxScalar(domain.fTop, kFullRect.fTop);
|
||||
clippedDomain.fBottom = SkMinScalar(domain.fBottom, kFullRect.fBottom);
|
||||
GrAssert(clippedDomain.fLeft <= clippedDomain.fRight);
|
||||
GrAssert(clippedDomain.fTop <= clippedDomain.fBottom);
|
||||
SkASSERT(clippedDomain.fLeft <= clippedDomain.fRight);
|
||||
SkASSERT(clippedDomain.fTop <= clippedDomain.fBottom);
|
||||
|
||||
AutoEffectUnref effect(SkNEW_ARGS(GrTextureDomainEffect, (texture,
|
||||
matrix,
|
||||
|
@ -35,7 +35,7 @@ GrTextureStripAtlas::GetCache() {
|
||||
|
||||
// Remove the specified atlas from the cache
|
||||
void GrTextureStripAtlas::CleanUp(const GrContext*, void* info) {
|
||||
GrAssert(NULL != info);
|
||||
SkASSERT(NULL != info);
|
||||
|
||||
AtlasEntry* entry = static_cast<AtlasEntry*>(info);
|
||||
|
||||
@ -78,7 +78,7 @@ GrTextureStripAtlas::GrTextureStripAtlas(GrTextureStripAtlas::Desc desc)
|
||||
, fRows(SkNEW_ARRAY(AtlasRow, fNumRows))
|
||||
, fLRUFront(NULL)
|
||||
, fLRUBack(NULL) {
|
||||
GrAssert(fNumRows * fDesc.fRowHeight == fDesc.fHeight);
|
||||
SkASSERT(fNumRows * fDesc.fRowHeight == fDesc.fHeight);
|
||||
this->initLRU();
|
||||
VALIDATE;
|
||||
}
|
||||
@ -164,7 +164,7 @@ int GrTextureStripAtlas::lockRow(const SkBitmap& data) {
|
||||
GrContext::kDontFlush_PixelOpsFlag);
|
||||
}
|
||||
|
||||
GrAssert(rowNumber >= 0);
|
||||
SkASSERT(rowNumber >= 0);
|
||||
VALIDATE;
|
||||
return rowNumber;
|
||||
}
|
||||
@ -173,7 +173,7 @@ void GrTextureStripAtlas::unlockRow(int row) {
|
||||
VALIDATE;
|
||||
--fRows[row].fLocks;
|
||||
--fLockedRows;
|
||||
GrAssert(fRows[row].fLocks >= 0 && fLockedRows >= 0);
|
||||
SkASSERT(fRows[row].fLocks >= 0 && fLockedRows >= 0);
|
||||
if (0 == fRows[row].fLocks) {
|
||||
this->appendLRU(fRows + row);
|
||||
}
|
||||
@ -209,11 +209,11 @@ void GrTextureStripAtlas::lockTexture() {
|
||||
this->initLRU();
|
||||
fKeyTable.rewind();
|
||||
}
|
||||
GrAssert(NULL != fTexture);
|
||||
SkASSERT(NULL != fTexture);
|
||||
}
|
||||
|
||||
void GrTextureStripAtlas::unlockTexture() {
|
||||
GrAssert(NULL != fTexture && 0 == fLockedRows);
|
||||
SkASSERT(NULL != fTexture && 0 == fLockedRows);
|
||||
fTexture->unref();
|
||||
fTexture = NULL;
|
||||
fDesc.fContext->purgeCache();
|
||||
@ -229,12 +229,12 @@ void GrTextureStripAtlas::initLRU() {
|
||||
fRows[i].fPrev = NULL;
|
||||
this->appendLRU(fRows + i);
|
||||
}
|
||||
GrAssert(NULL == fLRUFront || NULL == fLRUFront->fPrev);
|
||||
GrAssert(NULL == fLRUBack || NULL == fLRUBack->fNext);
|
||||
SkASSERT(NULL == fLRUFront || NULL == fLRUFront->fPrev);
|
||||
SkASSERT(NULL == fLRUBack || NULL == fLRUBack->fNext);
|
||||
}
|
||||
|
||||
void GrTextureStripAtlas::appendLRU(AtlasRow* row) {
|
||||
GrAssert(NULL == row->fPrev && NULL == row->fNext);
|
||||
SkASSERT(NULL == row->fPrev && NULL == row->fNext);
|
||||
if (NULL == fLRUFront && NULL == fLRUBack) {
|
||||
fLRUFront = row;
|
||||
fLRUBack = row;
|
||||
@ -246,20 +246,20 @@ void GrTextureStripAtlas::appendLRU(AtlasRow* row) {
|
||||
}
|
||||
|
||||
void GrTextureStripAtlas::removeFromLRU(AtlasRow* row) {
|
||||
GrAssert(NULL != row);
|
||||
SkASSERT(NULL != row);
|
||||
if (NULL != row->fNext && NULL != row->fPrev) {
|
||||
row->fPrev->fNext = row->fNext;
|
||||
row->fNext->fPrev = row->fPrev;
|
||||
} else {
|
||||
if (NULL == row->fNext) {
|
||||
GrAssert(row == fLRUBack);
|
||||
SkASSERT(row == fLRUBack);
|
||||
fLRUBack = row->fPrev;
|
||||
if (fLRUBack) {
|
||||
fLRUBack->fNext = NULL;
|
||||
}
|
||||
}
|
||||
if (NULL == row->fPrev) {
|
||||
GrAssert(row == fLRUFront);
|
||||
SkASSERT(row == fLRUFront);
|
||||
fLRUFront = row->fNext;
|
||||
if (fLRUFront) {
|
||||
fLRUFront->fPrev = NULL;
|
||||
@ -286,20 +286,20 @@ void GrTextureStripAtlas::validate() {
|
||||
// Our key table should be sorted
|
||||
uint32_t prev = 1 > fKeyTable.count() ? 0 : fKeyTable[0]->fKey;
|
||||
for (int i = 1; i < fKeyTable.count(); ++i) {
|
||||
GrAssert(prev < fKeyTable[i]->fKey);
|
||||
GrAssert(fKeyTable[i]->fKey != kEmptyAtlasRowKey);
|
||||
SkASSERT(prev < fKeyTable[i]->fKey);
|
||||
SkASSERT(fKeyTable[i]->fKey != kEmptyAtlasRowKey);
|
||||
prev = fKeyTable[i]->fKey;
|
||||
}
|
||||
|
||||
int lruCount = 0;
|
||||
// Validate LRU pointers, and count LRU entries
|
||||
GrAssert(NULL == fLRUFront || NULL == fLRUFront->fPrev);
|
||||
GrAssert(NULL == fLRUBack || NULL == fLRUBack->fNext);
|
||||
SkASSERT(NULL == fLRUFront || NULL == fLRUFront->fPrev);
|
||||
SkASSERT(NULL == fLRUBack || NULL == fLRUBack->fNext);
|
||||
for (AtlasRow* r = fLRUFront; r != NULL; r = r->fNext) {
|
||||
if (NULL == r->fNext) {
|
||||
GrAssert(r == fLRUBack);
|
||||
SkASSERT(r == fLRUBack);
|
||||
} else {
|
||||
GrAssert(r->fNext->fPrev == r);
|
||||
SkASSERT(r->fNext->fPrev == r);
|
||||
}
|
||||
++lruCount;
|
||||
}
|
||||
@ -319,30 +319,30 @@ void GrTextureStripAtlas::validate() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
GrAssert(inLRU);
|
||||
SkASSERT(inLRU);
|
||||
} else {
|
||||
// If we are locked, we should have a key
|
||||
GrAssert(kEmptyAtlasRowKey != fRows[i].fKey);
|
||||
SkASSERT(kEmptyAtlasRowKey != fRows[i].fKey);
|
||||
}
|
||||
|
||||
// If we have a key != kEmptyAtlasRowKey, it should be in the key table
|
||||
GrAssert(fRows[i].fKey == kEmptyAtlasRowKey || this->searchByKey(fRows[i].fKey) >= 0);
|
||||
SkASSERT(fRows[i].fKey == kEmptyAtlasRowKey || this->searchByKey(fRows[i].fKey) >= 0);
|
||||
}
|
||||
|
||||
// Our count of locks should equal the sum of row locks, unless we ran out of rows and flushed,
|
||||
// in which case we'll have one more lock than recorded in the rows (to represent the pending
|
||||
// lock of a row; which ensures we don't unlock the texture prematurely).
|
||||
GrAssert(rowLocks == fLockedRows || rowLocks + 1 == fLockedRows);
|
||||
SkASSERT(rowLocks == fLockedRows || rowLocks + 1 == fLockedRows);
|
||||
|
||||
// We should have one lru entry for each free row
|
||||
GrAssert(freeRows == lruCount);
|
||||
SkASSERT(freeRows == lruCount);
|
||||
|
||||
// If we have locked rows, we should have a locked texture, otherwise
|
||||
// it should be unlocked
|
||||
if (fLockedRows == 0) {
|
||||
GrAssert(NULL == fTexture);
|
||||
SkASSERT(NULL == fTexture);
|
||||
} else {
|
||||
GrAssert(NULL != fTexture);
|
||||
SkASSERT(NULL != fTexture);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -44,7 +44,7 @@ void GrGLBufferImpl::release(GrGpuGL* gpu) {
|
||||
if (GR_GL_ARRAY_BUFFER == fBufferType) {
|
||||
gpu->notifyVertexBufferDelete(fDesc.fID);
|
||||
} else {
|
||||
GrAssert(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
|
||||
SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
|
||||
gpu->notifyIndexBufferDelete(fDesc.fID);
|
||||
}
|
||||
fDesc.fID = 0;
|
||||
@ -64,14 +64,14 @@ void GrGLBufferImpl::bind(GrGpuGL* gpu) const {
|
||||
if (GR_GL_ARRAY_BUFFER == fBufferType) {
|
||||
gpu->bindVertexBuffer(fDesc.fID);
|
||||
} else {
|
||||
GrAssert(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
|
||||
SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
|
||||
gpu->bindIndexBufferAndDefaultVertexArray(fDesc.fID);
|
||||
}
|
||||
}
|
||||
|
||||
void* GrGLBufferImpl::lock(GrGpuGL* gpu) {
|
||||
VALIDATE();
|
||||
GrAssert(!this->isLocked());
|
||||
SkASSERT(!this->isLocked());
|
||||
if (0 == fDesc.fID) {
|
||||
fLockPtr = fCPUData;
|
||||
} else if (gpu->caps()->bufferLockSupport()) {
|
||||
@ -90,9 +90,9 @@ void* GrGLBufferImpl::lock(GrGpuGL* gpu) {
|
||||
|
||||
void GrGLBufferImpl::unlock(GrGpuGL* gpu) {
|
||||
VALIDATE();
|
||||
GrAssert(this->isLocked());
|
||||
SkASSERT(this->isLocked());
|
||||
if (0 != fDesc.fID) {
|
||||
GrAssert(gpu->caps()->bufferLockSupport());
|
||||
SkASSERT(gpu->caps()->bufferLockSupport());
|
||||
this->bind(gpu);
|
||||
GL_CALL(gpu, UnmapBuffer(fBufferType));
|
||||
}
|
||||
@ -105,7 +105,7 @@ bool GrGLBufferImpl::isLocked() const {
|
||||
}
|
||||
|
||||
bool GrGLBufferImpl::updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInBytes) {
|
||||
GrAssert(!this->isLocked());
|
||||
SkASSERT(!this->isLocked());
|
||||
VALIDATE();
|
||||
if (srcSizeInBytes > fDesc.fSizeInBytes) {
|
||||
return false;
|
||||
@ -157,9 +157,9 @@ bool GrGLBufferImpl::updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInB
|
||||
}
|
||||
|
||||
void GrGLBufferImpl::validate() const {
|
||||
GrAssert(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
|
||||
SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
|
||||
// The following assert isn't valid when the buffer has been abandoned:
|
||||
// GrAssert((0 == fDesc.fID) == (NULL != fCPUData));
|
||||
GrAssert(0 != fDesc.fID || !fDesc.fIsWrapped);
|
||||
GrAssert(NULL == fCPUData || NULL == fLockPtr || fCPUData == fLockPtr);
|
||||
// SkASSERT((0 == fDesc.fID) == (NULL != fCPUData));
|
||||
SkASSERT(0 != fDesc.fID || !fDesc.fIsWrapped);
|
||||
SkASSERT(NULL == fCPUData || NULL == fLockPtr || fCPUData == fLockPtr);
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ public:
|
||||
GrGLBufferImpl(GrGpuGL*, const Desc&, GrGLenum bufferType);
|
||||
~GrGLBufferImpl() {
|
||||
// either release or abandon should have been called by the owner of this object.
|
||||
GrAssert(0 == fDesc.fID);
|
||||
SkASSERT(0 == fDesc.fID);
|
||||
}
|
||||
|
||||
void abandon();
|
||||
|
@ -105,7 +105,7 @@ void GrGLCaps::init(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli) {
|
||||
GR_GL_GetIntegerv(gli, GR_GL_MAX_FRAGMENT_UNIFORM_VECTORS,
|
||||
&fMaxFragmentUniformVectors);
|
||||
} else {
|
||||
GrAssert(kDesktop_GrGLBinding == binding);
|
||||
SkASSERT(kDesktop_GrGLBinding == binding);
|
||||
GrGLint max;
|
||||
GR_GL_GetIntegerv(gli, GR_GL_MAX_FRAGMENT_UNIFORM_COMPONENTS, &max);
|
||||
fMaxFragmentUniformVectors = max / 4;
|
||||
@ -130,7 +130,7 @@ void GrGLCaps::init(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli) {
|
||||
fBGRAFormatSupport = true;
|
||||
fBGRAIsInternalFormat = true;
|
||||
}
|
||||
GrAssert(fBGRAFormatSupport ||
|
||||
SkASSERT(fBGRAFormatSupport ||
|
||||
kSkia8888_GrPixelConfig != kBGRA_8888_GrPixelConfig);
|
||||
}
|
||||
|
||||
@ -418,7 +418,7 @@ const GrGLCaps::MSAACoverageMode& GrGLCaps::getMSAACoverageMode(int desiredSampl
|
||||
if (0 == fMSAACoverageModes.count()) {
|
||||
return kNoneMode;
|
||||
} else {
|
||||
GrAssert(kNone_CoverageAAType != fCoverageAAType);
|
||||
SkASSERT(kNone_CoverageAAType != fCoverageAAType);
|
||||
int max = (fMSAACoverageModes.end() - 1)->fCoverageSampleCnt;
|
||||
desiredSampleCount = GrMin(desiredSampleCount, max);
|
||||
MSAACoverageMode desiredMode = {desiredSampleCount, 0};
|
||||
@ -429,7 +429,7 @@ const GrGLCaps::MSAACoverageMode& GrGLCaps::getMSAACoverageMode(int desiredSampl
|
||||
if (idx < 0) {
|
||||
idx = ~idx;
|
||||
}
|
||||
GrAssert(idx >= 0 && idx < fMSAACoverageModes.count());
|
||||
SkASSERT(idx >= 0 && idx < fMSAACoverageModes.count());
|
||||
return fMSAACoverageModes[idx];
|
||||
}
|
||||
}
|
||||
@ -487,7 +487,7 @@ void GrGLCaps::initStencilFormats(const GrGLContextInfo& ctxInfo) {
|
||||
fStencilFormats.push_back() = gS4;
|
||||
}
|
||||
}
|
||||
GrAssert(0 == fStencilVerifiedColorConfigs.count());
|
||||
SkASSERT(0 == fStencilVerifiedColorConfigs.count());
|
||||
fStencilVerifiedColorConfigs.push_back_n(fStencilFormats.count());
|
||||
}
|
||||
|
||||
@ -497,12 +497,12 @@ void GrGLCaps::markColorConfigAndStencilFormatAsVerified(
|
||||
#if !GR_GL_CHECK_FBO_STATUS_ONCE_PER_FORMAT
|
||||
return;
|
||||
#endif
|
||||
GrAssert((unsigned)config < (unsigned)kGrPixelConfigCnt);
|
||||
GrAssert(fStencilFormats.count() == fStencilVerifiedColorConfigs.count());
|
||||
SkASSERT((unsigned)config < (unsigned)kGrPixelConfigCnt);
|
||||
SkASSERT(fStencilFormats.count() == fStencilVerifiedColorConfigs.count());
|
||||
int count = fStencilFormats.count();
|
||||
// we expect a really small number of possible formats so linear search
|
||||
// should be OK
|
||||
GrAssert(count < 16);
|
||||
SkASSERT(count < 16);
|
||||
for (int i = 0; i < count; ++i) {
|
||||
if (format.fInternalFormat ==
|
||||
fStencilFormats[i].fInternalFormat) {
|
||||
@ -520,11 +520,11 @@ bool GrGLCaps::isColorConfigAndStencilFormatVerified(
|
||||
#if !GR_GL_CHECK_FBO_STATUS_ONCE_PER_FORMAT
|
||||
return false;
|
||||
#endif
|
||||
GrAssert((unsigned)config < (unsigned)kGrPixelConfigCnt);
|
||||
SkASSERT((unsigned)config < (unsigned)kGrPixelConfigCnt);
|
||||
int count = fStencilFormats.count();
|
||||
// we expect a really small number of possible formats so linear search
|
||||
// should be OK
|
||||
GrAssert(count < 16);
|
||||
SkASSERT(count < 16);
|
||||
for (int i = 0; i < count; ++i) {
|
||||
if (format.fInternalFormat ==
|
||||
fStencilFormats[i].fInternalFormat) {
|
||||
|
@ -23,7 +23,7 @@ public:
|
||||
|
||||
void allocate(GrGLsizeiptr size, const GrGLchar* dataPtr) {
|
||||
if (NULL != fDataPtr) {
|
||||
GrAssert(0 != fSize);
|
||||
SkASSERT(0 != fSize);
|
||||
SkDELETE_ARRAY(fDataPtr);
|
||||
}
|
||||
|
||||
@ -53,7 +53,7 @@ static GrGLuint gCurrElementArrayBuffer;
|
||||
|
||||
static GrBufferObj* look_up(GrGLuint id) {
|
||||
GrBufferObj* buffer = gBuffers[id];
|
||||
GrAssert(NULL != buffer && buffer->id() == id);
|
||||
SkASSERT(NULL != buffer && buffer->id() == id);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ static GrBufferObj* create_buffer() {
|
||||
}
|
||||
|
||||
static void delete_buffer(GrBufferObj* buffer) {
|
||||
GrAssert(gBuffers.count() > 0);
|
||||
SkASSERT(gBuffers.count() > 0);
|
||||
|
||||
GrGLuint id = buffer->id();
|
||||
SkDELETE(buffer);
|
||||
@ -200,12 +200,12 @@ GrGLvoid* GR_GL_FUNCTION_TYPE nullGLMapBuffer(GrGLenum target, GrGLenum access)
|
||||
|
||||
if (id > 0) {
|
||||
GrBufferObj* buffer = look_up(id);
|
||||
GrAssert(!buffer->mapped());
|
||||
SkASSERT(!buffer->mapped());
|
||||
buffer->setMapped(true);
|
||||
return buffer->dataPtr();
|
||||
}
|
||||
|
||||
GrAssert(false);
|
||||
SkASSERT(false);
|
||||
return NULL; // no buffer bound to target
|
||||
}
|
||||
|
||||
@ -221,7 +221,7 @@ GrGLboolean GR_GL_FUNCTION_TYPE nullGLUnmapBuffer(GrGLenum target) {
|
||||
}
|
||||
if (id > 0) {
|
||||
GrBufferObj* buffer = look_up(id);
|
||||
GrAssert(buffer->mapped());
|
||||
SkASSERT(buffer->mapped());
|
||||
buffer->setMapped(false);
|
||||
return GR_GL_TRUE;
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ GrGLEffect::EffectKey GrGLEffect::GenTextureKey(const GrDrawEffect& drawEffect,
|
||||
for (int index = 0; index < numTextures; ++index) {
|
||||
const GrTextureAccess& access = (*drawEffect.effect())->textureAccess(index);
|
||||
EffectKey value = GrGLShaderBuilder::KeyForTextureAccess(access, caps) << index;
|
||||
GrAssert(0 == (value & key)); // keys for each access ought not to overlap
|
||||
SkASSERT(0 == (value & key)); // keys for each access ought not to overlap
|
||||
key |= value;
|
||||
}
|
||||
return key;
|
||||
@ -38,11 +38,11 @@ GrGLEffect::EffectKey GrGLEffect::GenAttribKey(const GrDrawEffect& drawEffect) {
|
||||
EffectKey key = 0;
|
||||
|
||||
int numAttributes = drawEffect.getVertexAttribIndexCount();
|
||||
GrAssert(numAttributes <= 2);
|
||||
SkASSERT(numAttributes <= 2);
|
||||
const int* attributeIndices = drawEffect.getVertexAttribIndices();
|
||||
for (int index = 0; index < numAttributes; ++index) {
|
||||
EffectKey value = attributeIndices[index] << 3*index;
|
||||
GrAssert(0 == (value & key)); // keys for each attribute ought not to overlap
|
||||
SkASSERT(0 == (value & key)); // keys for each attribute ought not to overlap
|
||||
key |= value;
|
||||
}
|
||||
|
||||
|
@ -101,11 +101,11 @@ GrSLType GrGLEffectMatrix::emitCode(GrGLShaderBuilder* builder,
|
||||
const GrGLShaderVar* coords;
|
||||
switch (fCoordsType) {
|
||||
case GrEffect::kLocal_CoordsType:
|
||||
GrAssert(!(kPositionCoords_Flag & key));
|
||||
SkASSERT(!(kPositionCoords_Flag & key));
|
||||
coords = &builder->localCoordsAttribute();
|
||||
break;
|
||||
case GrEffect::kPosition_CoordsType:
|
||||
GrAssert((kPositionCoords_Flag & key) || !builder->hasExplicitLocalCoords());
|
||||
SkASSERT((kPositionCoords_Flag & key) || !builder->hasExplicitLocalCoords());
|
||||
coords = &builder->positionAttribute();
|
||||
break;
|
||||
default:
|
||||
@ -115,16 +115,16 @@ GrSLType GrGLEffectMatrix::emitCode(GrGLShaderBuilder* builder,
|
||||
// varying = matrix * coords (logically)
|
||||
switch (fUniType) {
|
||||
case kVoid_GrSLType:
|
||||
GrAssert(kVec2f_GrSLType == varyingType);
|
||||
SkASSERT(kVec2f_GrSLType == varyingType);
|
||||
builder->vsCodeAppendf("\t%s = %s;\n", vsVaryingName, coords->c_str());
|
||||
break;
|
||||
case kVec2f_GrSLType:
|
||||
GrAssert(kVec2f_GrSLType == varyingType);
|
||||
SkASSERT(kVec2f_GrSLType == varyingType);
|
||||
builder->vsCodeAppendf("\t%s = %s + %s;\n",
|
||||
vsVaryingName, uniName, coords->c_str());
|
||||
break;
|
||||
case kMat33f_GrSLType: {
|
||||
GrAssert(kVec2f_GrSLType == varyingType || kVec3f_GrSLType == varyingType);
|
||||
SkASSERT(kVec2f_GrSLType == varyingType || kVec3f_GrSLType == varyingType);
|
||||
if (kVec2f_GrSLType == varyingType) {
|
||||
builder->vsCodeAppendf("\t%s = (%s * vec3(%s, 1)).xy;\n",
|
||||
vsVaryingName, uniName, coords->c_str());
|
||||
@ -189,19 +189,19 @@ void GrGLEffectMatrix::setData(const GrGLUniformManager& uniformManager,
|
||||
const SkMatrix& matrix,
|
||||
const GrDrawEffect& drawEffect,
|
||||
const GrTexture* texture) {
|
||||
GrAssert(fUni.isValid() != (kVoid_GrSLType == fUniType));
|
||||
SkASSERT(fUni.isValid() != (kVoid_GrSLType == fUniType));
|
||||
const SkMatrix& coordChangeMatrix = GrEffect::kLocal_CoordsType == fCoordsType ?
|
||||
drawEffect.getCoordChangeMatrix() :
|
||||
SkMatrix::I();
|
||||
switch (fUniType) {
|
||||
case kVoid_GrSLType:
|
||||
GrAssert(matrix.isIdentity());
|
||||
GrAssert(coordChangeMatrix.isIdentity());
|
||||
GrAssert(NULL == texture || kTopLeft_GrSurfaceOrigin == texture->origin());
|
||||
SkASSERT(matrix.isIdentity());
|
||||
SkASSERT(coordChangeMatrix.isIdentity());
|
||||
SkASSERT(NULL == texture || kTopLeft_GrSurfaceOrigin == texture->origin());
|
||||
return;
|
||||
case kVec2f_GrSLType: {
|
||||
GrAssert(SkMatrix::kTranslate_Mask == (matrix.getType() | coordChangeMatrix.getType()));
|
||||
GrAssert(NULL == texture || kTopLeft_GrSurfaceOrigin == texture->origin());
|
||||
SkASSERT(SkMatrix::kTranslate_Mask == (matrix.getType() | coordChangeMatrix.getType()));
|
||||
SkASSERT(NULL == texture || kTopLeft_GrSurfaceOrigin == texture->origin());
|
||||
SkScalar tx = matrix[SkMatrix::kMTransX] + (coordChangeMatrix)[SkMatrix::kMTransX];
|
||||
SkScalar ty = matrix[SkMatrix::kMTransY] + (coordChangeMatrix)[SkMatrix::kMTransY];
|
||||
if (fPrevMatrix.get(SkMatrix::kMTransX) != tx ||
|
||||
|
@ -60,7 +60,7 @@ public:
|
||||
|
||||
GrGLEffectMatrix(CoordsType coordsType)
|
||||
: fCoordsType(coordsType) {
|
||||
GrAssert(GrEffect::kLocal_CoordsType == coordsType ||
|
||||
SkASSERT(GrEffect::kLocal_CoordsType == coordsType ||
|
||||
GrEffect::kPosition_CoordsType == coordsType);
|
||||
fPrevMatrix = SkMatrix::InvalidMatrix();
|
||||
}
|
||||
|
@ -54,10 +54,10 @@ struct GrGLIRect {
|
||||
}
|
||||
fHeight = height;
|
||||
|
||||
GrAssert(fLeft >= 0);
|
||||
GrAssert(fWidth >= 0);
|
||||
GrAssert(fBottom >= 0);
|
||||
GrAssert(fHeight >= 0);
|
||||
SkASSERT(fLeft >= 0);
|
||||
SkASSERT(fWidth >= 0);
|
||||
SkASSERT(fBottom >= 0);
|
||||
SkASSERT(fHeight >= 0);
|
||||
}
|
||||
|
||||
bool contains(const GrGLIRect& glRect) const {
|
||||
|
@ -45,7 +45,7 @@ protected:
|
||||
|
||||
private:
|
||||
GrGpuGL* getGpuGL() const {
|
||||
GrAssert(this->isValid());
|
||||
SkASSERT(this->isValid());
|
||||
return (GrGpuGL*)(this->getGpu());
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@ inline GrGLubyte verb_to_gl_path_cmd(const SkPath::Verb verb) {
|
||||
GR_STATIC_ASSERT(4 == SkPath::kCubic_Verb);
|
||||
GR_STATIC_ASSERT(5 == SkPath::kClose_Verb);
|
||||
|
||||
GrAssert(verb >= 0 && (size_t)verb < GR_ARRAY_COUNT(gTable));
|
||||
SkASSERT(verb >= 0 && (size_t)verb < GR_ARRAY_COUNT(gTable));
|
||||
return gTable[verb];
|
||||
}
|
||||
|
||||
@ -50,7 +50,7 @@ inline int num_pts(const SkPath::Verb verb) {
|
||||
GR_STATIC_ASSERT(4 == SkPath::kCubic_Verb);
|
||||
GR_STATIC_ASSERT(5 == SkPath::kClose_Verb);
|
||||
|
||||
GrAssert(verb >= 0 && (size_t)verb < GR_ARRAY_COUNT(gTable));
|
||||
SkASSERT(verb >= 0 && (size_t)verb < GR_ARRAY_COUNT(gTable));
|
||||
return gTable[verb];
|
||||
}
|
||||
#endif
|
||||
@ -84,7 +84,7 @@ GrGLPath::GrGLPath(GrGpuGL* gpu, const SkPath& path) : INHERITED(gpu, kIsWrapped
|
||||
pathCommands[i] = verb_to_gl_path_cmd(v);
|
||||
GR_DEBUGCODE(numPts += num_pts(v));
|
||||
}
|
||||
GrAssert(pathPoints.count() == numPts);
|
||||
SkASSERT(pathPoints.count() == numPts);
|
||||
|
||||
GL_CALL(PathCommands(fPathID,
|
||||
verbCnt, &pathCommands[0],
|
||||
|
@ -107,7 +107,7 @@ void GrGLProgram::overrideBlend(GrBlendCoeff* srcCoeff,
|
||||
break;
|
||||
case GrGLProgramDesc::kCombineWithDst_CoverageOutput:
|
||||
// We should only have set this if the blend was specified as (1, 0)
|
||||
GrAssert(kOne_GrBlendCoeff == *srcCoeff && kZero_GrBlendCoeff == *dstCoeff);
|
||||
SkASSERT(kOne_GrBlendCoeff == *srcCoeff && kZero_GrBlendCoeff == *dstCoeff);
|
||||
break;
|
||||
default:
|
||||
GrCrash("Unexpected coverage output");
|
||||
@ -282,7 +282,7 @@ void GrGLProgram::genGeometryShader(GrGLShaderBuilder* builder) const {
|
||||
#if GR_GL_EXPERIMENTAL_GS
|
||||
// TODO: The builder should add all this glue code.
|
||||
if (fDesc.getHeader().fExperimentalGS) {
|
||||
GrAssert(fContext.info().glslGeneration() >= k150_GrGLSLGeneration);
|
||||
SkASSERT(fContext.info().glslGeneration() >= k150_GrGLSLGeneration);
|
||||
builder->fGSHeader.append("layout(triangles) in;\n"
|
||||
"layout(triangle_strip, max_vertices = 6) out;\n");
|
||||
builder->gsCodeAppend("\tfor (int i = 0; i < 3; ++i) {\n"
|
||||
@ -290,7 +290,7 @@ void GrGLProgram::genGeometryShader(GrGLShaderBuilder* builder) const {
|
||||
if (fDesc.getHeader().fEmitsPointSize) {
|
||||
builder->gsCodeAppend("\t\tgl_PointSize = 1.0;\n");
|
||||
}
|
||||
GrAssert(builder->fGSInputs.count() == builder->fGSOutputs.count());
|
||||
SkASSERT(builder->fGSInputs.count() == builder->fGSOutputs.count());
|
||||
int count = builder->fGSInputs.count();
|
||||
for (int i = 0; i < count; ++i) {
|
||||
builder->gsCodeAppendf("\t\t%s = %s[i];\n",
|
||||
@ -364,7 +364,7 @@ GrGLuint compile_shader(const GrGLContext& gl,
|
||||
print_shader(stringCnt, strings, stringLengths);
|
||||
GrPrintf("\n%s", log.get());
|
||||
}
|
||||
GrAssert(!"Shader compilation failed!");
|
||||
SkASSERT(!"Shader compilation failed!");
|
||||
GR_GL_CALL(gli, DeleteShader(shader));
|
||||
return 0;
|
||||
}
|
||||
@ -379,7 +379,7 @@ GrGLuint compile_shader(const GrGLContext& gl, GrGLenum type, const SkString& sh
|
||||
}
|
||||
|
||||
void expand_known_value4f(SkString* string, GrSLConstantVec vec) {
|
||||
GrAssert(string->isEmpty() == (vec != kNone_GrSLConstantVec));
|
||||
SkASSERT(string->isEmpty() == (vec != kNone_GrSLConstantVec));
|
||||
switch (vec) {
|
||||
case kNone_GrSLConstantVec:
|
||||
break;
|
||||
@ -437,7 +437,7 @@ bool GrGLProgram::compileShaders(const GrGLShaderBuilder& builder) {
|
||||
|
||||
bool GrGLProgram::genProgram(const GrEffectStage* colorStages[],
|
||||
const GrEffectStage* coverageStages[]) {
|
||||
GrAssert(0 == fProgramID);
|
||||
SkASSERT(0 == fProgramID);
|
||||
|
||||
const GrGLProgramDesc::KeyHeader& header = fDesc.getHeader();
|
||||
|
||||
@ -742,7 +742,7 @@ bool GrGLProgram::bindOutputsAttribsAndLinkProgram(const GrGLShaderBuilder& buil
|
||||
(char*)log.get()));
|
||||
GrPrintf((char*)log.get());
|
||||
}
|
||||
GrAssert(!"Error linking program");
|
||||
SkASSERT(!"Error linking program");
|
||||
GL_CALL(DeleteProgram(fProgramID));
|
||||
fProgramID = 0;
|
||||
return false;
|
||||
@ -792,7 +792,7 @@ void GrGLProgram::setEffectData(GrGpuGL* gpu,
|
||||
|
||||
// Bind the texures for the effect.
|
||||
int numSamplers = effect.fSamplerUnis.count();
|
||||
GrAssert((*stage.getEffect())->numTextures() == numSamplers);
|
||||
SkASSERT((*stage.getEffect())->numTextures() == numSamplers);
|
||||
for (int s = 0; s < numSamplers; ++s) {
|
||||
UniformHandle handle = effect.fSamplerUnis[s];
|
||||
if (handle.isValid()) {
|
||||
@ -850,13 +850,13 @@ void GrGLProgram::setData(GrGpuGL* gpu,
|
||||
static GrTextureParams kParams; // the default is clamp, nearest filtering.
|
||||
gpu->bindTexture(fDstCopyTexUnit, kParams, texture);
|
||||
} else {
|
||||
GrAssert(!fUniformHandles.fDstCopyScaleUni.isValid());
|
||||
GrAssert(!fUniformHandles.fDstCopySamplerUni.isValid());
|
||||
SkASSERT(!fUniformHandles.fDstCopyScaleUni.isValid());
|
||||
SkASSERT(!fUniformHandles.fDstCopySamplerUni.isValid());
|
||||
}
|
||||
} else {
|
||||
GrAssert(!fUniformHandles.fDstCopyTopLeftUni.isValid());
|
||||
GrAssert(!fUniformHandles.fDstCopyScaleUni.isValid());
|
||||
GrAssert(!fUniformHandles.fDstCopySamplerUni.isValid());
|
||||
SkASSERT(!fUniformHandles.fDstCopyTopLeftUni.isValid());
|
||||
SkASSERT(!fUniformHandles.fDstCopyScaleUni.isValid());
|
||||
SkASSERT(!fUniformHandles.fDstCopySamplerUni.isValid());
|
||||
}
|
||||
|
||||
for (int e = 0; e < fColorEffects.count(); ++e) {
|
||||
@ -881,7 +881,7 @@ void GrGLProgram::setColor(const GrDrawState& drawState,
|
||||
if (!drawState.hasColorVertexAttribute()) {
|
||||
switch (header.fColorInput) {
|
||||
case GrGLProgramDesc::kAttribute_ColorInput:
|
||||
GrAssert(-1 != header.fColorAttributeIndex);
|
||||
SkASSERT(-1 != header.fColorAttributeIndex);
|
||||
if (sharedState->fConstAttribColor != color ||
|
||||
sharedState->fConstAttribColorIndex != header.fColorAttributeIndex) {
|
||||
// OpenGL ES only supports the float varieties of glVertexAttrib
|
||||
|
@ -46,7 +46,7 @@ void GrGLProgramDesc::Build(const GrDrawState& drawState,
|
||||
coverageStages->reset();
|
||||
|
||||
// This should already have been caught
|
||||
GrAssert(!(GrDrawState::kSkipDraw_BlendOptFlag & blendOpts));
|
||||
SkASSERT(!(GrDrawState::kSkipDraw_BlendOptFlag & blendOpts));
|
||||
|
||||
bool skipCoverage = SkToBool(blendOpts & GrDrawState::kEmitTransBlack_BlendOptFlag);
|
||||
|
||||
@ -137,13 +137,13 @@ void GrGLProgramDesc::Build(const GrDrawState& drawState,
|
||||
}
|
||||
|
||||
if (readsDst) {
|
||||
GrAssert(NULL != dstCopy || gpu->caps()->dstReadInShaderSupport());
|
||||
SkASSERT(NULL != dstCopy || gpu->caps()->dstReadInShaderSupport());
|
||||
const GrTexture* dstCopyTexture = NULL;
|
||||
if (NULL != dstCopy) {
|
||||
dstCopyTexture = dstCopy->texture();
|
||||
}
|
||||
header->fDstReadKey = GrGLShaderBuilder::KeyForDstRead(dstCopyTexture, gpu->glCaps());
|
||||
GrAssert(0 != header->fDstReadKey);
|
||||
SkASSERT(0 != header->fDstReadKey);
|
||||
} else {
|
||||
header->fDstReadKey = 0;
|
||||
}
|
||||
@ -164,7 +164,7 @@ void GrGLProgramDesc::Build(const GrDrawState& drawState,
|
||||
if (requiresColorAttrib) {
|
||||
header->fColorAttributeIndex = drawState.colorVertexAttributeIndex();
|
||||
} else if (GrGLProgramDesc::kAttribute_ColorInput == header->fColorInput) {
|
||||
GrAssert(availableAttributeIndex < GrDrawState::kMaxVertexAttribCnt);
|
||||
SkASSERT(availableAttributeIndex < GrDrawState::kMaxVertexAttribCnt);
|
||||
header->fColorAttributeIndex = availableAttributeIndex;
|
||||
availableAttributeIndex++;
|
||||
} else {
|
||||
@ -174,7 +174,7 @@ void GrGLProgramDesc::Build(const GrDrawState& drawState,
|
||||
if (requiresCoverageAttrib) {
|
||||
header->fCoverageAttributeIndex = drawState.coverageVertexAttributeIndex();
|
||||
} else if (GrGLProgramDesc::kAttribute_ColorInput == header->fCoverageInput) {
|
||||
GrAssert(availableAttributeIndex < GrDrawState::kMaxVertexAttribCnt);
|
||||
SkASSERT(availableAttributeIndex < GrDrawState::kMaxVertexAttribCnt);
|
||||
header->fCoverageAttributeIndex = availableAttributeIndex;
|
||||
} else {
|
||||
header->fCoverageAttributeIndex = -1;
|
||||
|
@ -30,7 +30,7 @@ public:
|
||||
|
||||
// Returns this as a uint32_t array to be used as a key in the program cache.
|
||||
const uint32_t* asKey() const {
|
||||
GrAssert(fInitialized);
|
||||
SkASSERT(fInitialized);
|
||||
return reinterpret_cast<const uint32_t*>(fKey.get());
|
||||
}
|
||||
|
||||
@ -71,12 +71,12 @@ public:
|
||||
GrGLProgramDesc* outDesc);
|
||||
|
||||
int numColorEffects() const {
|
||||
GrAssert(fInitialized);
|
||||
SkASSERT(fInitialized);
|
||||
return this->getHeader().fColorEffectCnt;
|
||||
}
|
||||
|
||||
int numCoverageEffects() const {
|
||||
GrAssert(fInitialized);
|
||||
SkASSERT(fInitialized);
|
||||
return this->getHeader().fCoverageEffectCnt;
|
||||
}
|
||||
|
||||
@ -85,7 +85,7 @@ public:
|
||||
GrGLProgramDesc& operator= (const GrGLProgramDesc& other);
|
||||
|
||||
bool operator== (const GrGLProgramDesc& other) const {
|
||||
GrAssert(fInitialized && other.fInitialized);
|
||||
SkASSERT(fInitialized && other.fInitialized);
|
||||
// The length is masked as a hint to the compiler that the address will be 4 byte aligned.
|
||||
return 0 == memcmp(this->asKey(), other.asKey(), this->keyLength() & ~0x3);
|
||||
}
|
||||
|
@ -52,15 +52,15 @@ GrGLRenderTarget::GrGLRenderTarget(GrGpuGL* gpu,
|
||||
viewport.fWidth, viewport.fHeight,
|
||||
desc.fConfig, desc.fSampleCnt,
|
||||
desc.fOrigin)) {
|
||||
GrAssert(NULL != texID);
|
||||
GrAssert(NULL != texture);
|
||||
SkASSERT(NULL != texID);
|
||||
SkASSERT(NULL != texture);
|
||||
// FBO 0 can't also be a texture, right?
|
||||
GrAssert(0 != desc.fRTFBOID);
|
||||
GrAssert(0 != desc.fTexFBOID);
|
||||
SkASSERT(0 != desc.fRTFBOID);
|
||||
SkASSERT(0 != desc.fTexFBOID);
|
||||
|
||||
// we assume this is true, TODO: get rid of viewport as a param.
|
||||
GrAssert(viewport.fWidth == texture->width());
|
||||
GrAssert(viewport.fHeight == texture->height());
|
||||
SkASSERT(viewport.fWidth == texture->width());
|
||||
SkASSERT(viewport.fHeight == texture->height());
|
||||
|
||||
this->init(desc, viewport, texID);
|
||||
}
|
||||
|
@ -13,7 +13,7 @@ GrGLSLGeneration GrGetGLSLGeneration(GrGLBinding binding, const GrGLInterface* g
|
||||
GrGLSLVersion ver = GrGLGetGLSLVersion(gl);
|
||||
switch (binding) {
|
||||
case kDesktop_GrGLBinding:
|
||||
GrAssert(ver >= GR_GLSL_VER(1,10));
|
||||
SkASSERT(ver >= GR_GLSL_VER(1,10));
|
||||
if (ver >= GR_GLSL_VER(1,50)) {
|
||||
return k150_GrGLSLGeneration;
|
||||
} else if (ver >= GR_GLSL_VER(1,40)) {
|
||||
@ -25,7 +25,7 @@ GrGLSLGeneration GrGetGLSLGeneration(GrGLBinding binding, const GrGLInterface* g
|
||||
}
|
||||
case kES_GrGLBinding:
|
||||
// version 1.00 of ES GLSL based on ver 1.20 of desktop GLSL
|
||||
GrAssert(ver >= GR_GL_VER(1,00));
|
||||
SkASSERT(ver >= GR_GL_VER(1,00));
|
||||
return k110_GrGLSLGeneration;
|
||||
default:
|
||||
GrCrash("Unknown GL Binding");
|
||||
@ -41,17 +41,17 @@ const char* GrGetGLSLVersionDecl(GrGLBinding binding, GrGLSLGeneration gen) {
|
||||
// 1.00 of the ES language.
|
||||
return "#version 100\n";
|
||||
} else {
|
||||
GrAssert(kDesktop_GrGLBinding == binding);
|
||||
SkASSERT(kDesktop_GrGLBinding == binding);
|
||||
return "#version 110\n";
|
||||
}
|
||||
case k130_GrGLSLGeneration:
|
||||
GrAssert(kDesktop_GrGLBinding == binding);
|
||||
SkASSERT(kDesktop_GrGLBinding == binding);
|
||||
return "#version 130\n";
|
||||
case k140_GrGLSLGeneration:
|
||||
GrAssert(kDesktop_GrGLBinding == binding);
|
||||
SkASSERT(kDesktop_GrGLBinding == binding);
|
||||
return "#version 140\n";
|
||||
case k150_GrGLSLGeneration:
|
||||
GrAssert(kDesktop_GrGLBinding == binding);
|
||||
SkASSERT(kDesktop_GrGLBinding == binding);
|
||||
return "#version 150\n";
|
||||
default:
|
||||
GrCrash("Unknown GL version.");
|
||||
@ -69,7 +69,7 @@ bool GrGLSLSetupFSColorOuput(GrGLSLGeneration gen, const char* nameIfDeclared, G
|
||||
|
||||
const char* GrGLSLVectorHomogCoord(int count) {
|
||||
static const char* HOMOGS[] = {"ERROR", "", ".y", ".z", ".w"};
|
||||
GrAssert(count >= 1 && count < (int)GR_ARRAY_COUNT(HOMOGS));
|
||||
SkASSERT(count >= 1 && count < (int)GR_ARRAY_COUNT(HOMOGS));
|
||||
return HOMOGS[count];
|
||||
}
|
||||
|
||||
@ -79,7 +79,7 @@ const char* GrGLSLVectorHomogCoord(GrSLType type) {
|
||||
|
||||
const char* GrGLSLVectorNonhomogCoords(int count) {
|
||||
static const char* NONHOMOGS[] = {"ERROR", "", ".x", ".xy", ".xyz"};
|
||||
GrAssert(count >= 1 && count < (int)GR_ARRAY_COUNT(NONHOMOGS));
|
||||
SkASSERT(count >= 1 && count < (int)GR_ARRAY_COUNT(NONHOMOGS));
|
||||
return NONHOMOGS[count];
|
||||
}
|
||||
|
||||
@ -105,15 +105,15 @@ GrSLConstantVec GrGLSLMulVarBy4f(SkString* outAppend,
|
||||
GrSLConstantVec mulFactorDefault) {
|
||||
bool haveFactor = NULL != mulFactor && '\0' != *mulFactor;
|
||||
|
||||
GrAssert(NULL != outAppend);
|
||||
GrAssert(NULL != vec4VarName);
|
||||
GrAssert(kNone_GrSLConstantVec != mulFactorDefault || haveFactor);
|
||||
SkASSERT(NULL != outAppend);
|
||||
SkASSERT(NULL != vec4VarName);
|
||||
SkASSERT(kNone_GrSLConstantVec != mulFactorDefault || haveFactor);
|
||||
|
||||
if (!haveFactor) {
|
||||
if (kOnes_GrSLConstantVec == mulFactorDefault) {
|
||||
return kNone_GrSLConstantVec;
|
||||
} else {
|
||||
GrAssert(kZeros_GrSLConstantVec == mulFactorDefault);
|
||||
SkASSERT(kZeros_GrSLConstantVec == mulFactorDefault);
|
||||
append_tabs(outAppend, tabCnt);
|
||||
outAppend->appendf("%s = vec4(0, 0, 0, 0);\n", vec4VarName);
|
||||
return kZeros_GrSLConstantVec;
|
||||
@ -130,12 +130,12 @@ GrSLConstantVec GrGLSLGetComponent4f(SkString* outAppend,
|
||||
GrSLConstantVec defaultExpr,
|
||||
bool omitIfConst) {
|
||||
if (NULL == expr || '\0' == *expr) {
|
||||
GrAssert(defaultExpr != kNone_GrSLConstantVec);
|
||||
SkASSERT(defaultExpr != kNone_GrSLConstantVec);
|
||||
if (!omitIfConst) {
|
||||
if (kOnes_GrSLConstantVec == defaultExpr) {
|
||||
outAppend->append("1.0");
|
||||
} else {
|
||||
GrAssert(kZeros_GrSLConstantVec == defaultExpr);
|
||||
SkASSERT(kZeros_GrSLConstantVec == defaultExpr);
|
||||
outAppend->append("0.0");
|
||||
}
|
||||
}
|
||||
|
@ -61,14 +61,14 @@ static inline int GrSLTypeToVecLength(GrSLType type) {
|
||||
static inline const char* GrGLSLOnesVecf(int count) {
|
||||
static const char* kONESVEC[] = {"ERROR", "1.0", "vec2(1,1)",
|
||||
"vec3(1,1,1)", "vec4(1,1,1,1)"};
|
||||
GrAssert(count >= 1 && count < (int)GR_ARRAY_COUNT(kONESVEC));
|
||||
SkASSERT(count >= 1 && count < (int)GR_ARRAY_COUNT(kONESVEC));
|
||||
return kONESVEC[count];
|
||||
}
|
||||
|
||||
static inline const char* GrGLSLZerosVecf(int count) {
|
||||
static const char* kZEROSVEC[] = {"ERROR", "0.0", "vec2(0,0)",
|
||||
"vec3(0,0,0)", "vec4(0,0,0,0)"};
|
||||
GrAssert(count >= 1 && count < (int)GR_ARRAY_COUNT(kZEROSVEC));
|
||||
SkASSERT(count >= 1 && count < (int)GR_ARRAY_COUNT(kZEROSVEC));
|
||||
return kZEROSVEC[count];
|
||||
}
|
||||
}
|
||||
|
@ -13,7 +13,7 @@
|
||||
namespace {
|
||||
template<int N>
|
||||
GrSLConstantVec return_const_vecf(GrSLConstantVec constVec, SkString* outAppend, bool omitAppend) {
|
||||
GrAssert(kNone_GrSLConstantVec != constVec);
|
||||
SkASSERT(kNone_GrSLConstantVec != constVec);
|
||||
if (!omitAppend) {
|
||||
if (kZeros_GrSLConstantVec == constVec) {
|
||||
outAppend->append(GrGLSLZerosVecf(N));
|
||||
@ -32,18 +32,18 @@ GrSLConstantVec GrGLSLModulatef(SkString* outAppend,
|
||||
GrSLConstantVec default0,
|
||||
GrSLConstantVec default1,
|
||||
bool omitIfConstVec) {
|
||||
GrAssert(N > 0 && N <= 4);
|
||||
GrAssert(NULL != outAppend);
|
||||
SkASSERT(N > 0 && N <= 4);
|
||||
SkASSERT(NULL != outAppend);
|
||||
|
||||
bool has0 = NULL != in0 && '\0' != *in0;
|
||||
bool has1 = NULL != in1 && '\0' != *in1;
|
||||
|
||||
GrAssert(has0 || kNone_GrSLConstantVec != default0);
|
||||
GrAssert(has1 || kNone_GrSLConstantVec != default1);
|
||||
SkASSERT(has0 || kNone_GrSLConstantVec != default0);
|
||||
SkASSERT(has1 || kNone_GrSLConstantVec != default1);
|
||||
|
||||
if (!has0 && !has1) {
|
||||
GrAssert(kZeros_GrSLConstantVec == default0 || kOnes_GrSLConstantVec == default0);
|
||||
GrAssert(kZeros_GrSLConstantVec == default1 || kOnes_GrSLConstantVec == default1);
|
||||
SkASSERT(kZeros_GrSLConstantVec == default0 || kOnes_GrSLConstantVec == default0);
|
||||
SkASSERT(kZeros_GrSLConstantVec == default1 || kOnes_GrSLConstantVec == default1);
|
||||
if (kZeros_GrSLConstantVec == default0 || kZeros_GrSLConstantVec == default1) {
|
||||
return return_const_vecf<N>(kZeros_GrSLConstantVec, outAppend, omitIfConstVec);
|
||||
} else {
|
||||
@ -51,7 +51,7 @@ GrSLConstantVec GrGLSLModulatef(SkString* outAppend,
|
||||
return return_const_vecf<N>(kOnes_GrSLConstantVec, outAppend, omitIfConstVec);
|
||||
}
|
||||
} else if (!has0) {
|
||||
GrAssert(kZeros_GrSLConstantVec == default0 || kOnes_GrSLConstantVec == default0);
|
||||
SkASSERT(kZeros_GrSLConstantVec == default0 || kOnes_GrSLConstantVec == default0);
|
||||
if (kZeros_GrSLConstantVec == default0) {
|
||||
return return_const_vecf<N>(kZeros_GrSLConstantVec, outAppend, omitIfConstVec);
|
||||
} else {
|
||||
@ -59,7 +59,7 @@ GrSLConstantVec GrGLSLModulatef(SkString* outAppend,
|
||||
return kNone_GrSLConstantVec;
|
||||
}
|
||||
} else if (!has1) {
|
||||
GrAssert(kZeros_GrSLConstantVec == default1 || kOnes_GrSLConstantVec == default1);
|
||||
SkASSERT(kZeros_GrSLConstantVec == default1 || kOnes_GrSLConstantVec == default1);
|
||||
if (kZeros_GrSLConstantVec == default1) {
|
||||
return return_const_vecf<N>(kZeros_GrSLConstantVec, outAppend, omitIfConstVec);
|
||||
} else {
|
||||
@ -79,15 +79,15 @@ GrSLConstantVec GrGLSLAddf(SkString* outAppend,
|
||||
GrSLConstantVec default0,
|
||||
GrSLConstantVec default1,
|
||||
bool omitIfConstVec) {
|
||||
GrAssert(N > 0 && N <= 4);
|
||||
GrAssert(NULL != outAppend);
|
||||
SkASSERT(N > 0 && N <= 4);
|
||||
SkASSERT(NULL != outAppend);
|
||||
|
||||
bool has0 = NULL != in0 && '\0' != *in0;
|
||||
bool has1 = NULL != in1 && '\0' != *in1;
|
||||
|
||||
if (!has0 && !has1) {
|
||||
GrAssert(kNone_GrSLConstantVec != default0);
|
||||
GrAssert(kNone_GrSLConstantVec != default1);
|
||||
SkASSERT(kNone_GrSLConstantVec != default0);
|
||||
SkASSERT(kNone_GrSLConstantVec != default1);
|
||||
int sum = (kOnes_GrSLConstantVec == default0) + (kOnes_GrSLConstantVec == default1);
|
||||
if (0 == sum) {
|
||||
return return_const_vecf<N>(kZeros_GrSLConstantVec, outAppend, omitIfConstVec);
|
||||
@ -95,12 +95,12 @@ GrSLConstantVec GrGLSLAddf(SkString* outAppend,
|
||||
outAppend->append(GrGLSLOnesVecf(N));
|
||||
return return_const_vecf<N>(kOnes_GrSLConstantVec, outAppend, omitIfConstVec);
|
||||
} else {
|
||||
GrAssert(2 == sum);
|
||||
SkASSERT(2 == sum);
|
||||
outAppend->appendf("%s(2)", GrGLSLFloatVectorTypeString(N));
|
||||
return kNone_GrSLConstantVec;
|
||||
}
|
||||
} else if (!has0) {
|
||||
GrAssert(kNone_GrSLConstantVec != default0);
|
||||
SkASSERT(kNone_GrSLConstantVec != default0);
|
||||
if (kZeros_GrSLConstantVec == default0) {
|
||||
outAppend->appendf("%s(%s)", GrGLSLFloatVectorTypeString(N), in1);
|
||||
} else {
|
||||
@ -111,7 +111,7 @@ GrSLConstantVec GrGLSLAddf(SkString* outAppend,
|
||||
}
|
||||
return kNone_GrSLConstantVec;
|
||||
} else if (!has1) {
|
||||
GrAssert(kNone_GrSLConstantVec != default1);
|
||||
SkASSERT(kNone_GrSLConstantVec != default1);
|
||||
if (kZeros_GrSLConstantVec == default1) {
|
||||
outAppend->appendf("%s(%s)", GrGLSLFloatVectorTypeString(N), in0);
|
||||
} else {
|
||||
@ -138,15 +138,15 @@ GrSLConstantVec GrGLSLSubtractf(SkString* outAppend,
|
||||
GrSLConstantVec default0,
|
||||
GrSLConstantVec default1,
|
||||
bool omitIfConstVec) {
|
||||
GrAssert(N > 0 && N <= 4);
|
||||
GrAssert(NULL != outAppend);
|
||||
SkASSERT(N > 0 && N <= 4);
|
||||
SkASSERT(NULL != outAppend);
|
||||
|
||||
bool has0 = NULL != in0 && '\0' != *in0;
|
||||
bool has1 = NULL != in1 && '\0' != *in1;
|
||||
|
||||
if (!has0 && !has1) {
|
||||
GrAssert(kNone_GrSLConstantVec != default0);
|
||||
GrAssert(kNone_GrSLConstantVec != default1);
|
||||
SkASSERT(kNone_GrSLConstantVec != default0);
|
||||
SkASSERT(kNone_GrSLConstantVec != default1);
|
||||
int diff = (kOnes_GrSLConstantVec == default0) - (kOnes_GrSLConstantVec == default1);
|
||||
if (-1 == diff) {
|
||||
outAppend->appendf("%s(-1)", GrGLSLFloatVectorTypeString(N));
|
||||
@ -154,11 +154,11 @@ GrSLConstantVec GrGLSLSubtractf(SkString* outAppend,
|
||||
} else if (0 == diff) {
|
||||
return return_const_vecf<N>(kZeros_GrSLConstantVec, outAppend, omitIfConstVec);
|
||||
} else {
|
||||
GrAssert(1 == diff);
|
||||
SkASSERT(1 == diff);
|
||||
return return_const_vecf<N>(kOnes_GrSLConstantVec, outAppend, omitIfConstVec);
|
||||
}
|
||||
} else if (!has0) {
|
||||
GrAssert(kNone_GrSLConstantVec != default0);
|
||||
SkASSERT(kNone_GrSLConstantVec != default0);
|
||||
if (kZeros_GrSLConstantVec == default0) {
|
||||
outAppend->appendf("-%s(%s)", GrGLSLFloatVectorTypeString(N), in1);
|
||||
} else {
|
||||
@ -169,7 +169,7 @@ GrSLConstantVec GrGLSLSubtractf(SkString* outAppend,
|
||||
}
|
||||
return kNone_GrSLConstantVec;
|
||||
} else if (!has1) {
|
||||
GrAssert(kNone_GrSLConstantVec != default1);
|
||||
SkASSERT(kNone_GrSLConstantVec != default1);
|
||||
if (kZeros_GrSLConstantVec == default1) {
|
||||
outAppend->appendf("%s(%s)", GrGLSLFloatVectorTypeString(N), in0);
|
||||
} else {
|
||||
|
@ -29,7 +29,7 @@ inline const char* sample_function_name(GrSLType type, GrGLSLGeneration glslGen)
|
||||
if (kVec2f_GrSLType == type) {
|
||||
return glslGen >= k130_GrGLSLGeneration ? "texture" : "texture2D";
|
||||
} else {
|
||||
GrAssert(kVec3f_GrSLType == type);
|
||||
SkASSERT(kVec3f_GrSLType == type);
|
||||
return glslGen >= k130_GrGLSLGeneration ? "textureProj" : "texture2DProj";
|
||||
}
|
||||
}
|
||||
@ -293,7 +293,7 @@ void GrGLShaderBuilder::appendTextureLookup(SkString* out,
|
||||
const GrGLShaderBuilder::TextureSampler& sampler,
|
||||
const char* coordName,
|
||||
GrSLType varyingType) const {
|
||||
GrAssert(NULL != coordName);
|
||||
SkASSERT(NULL != coordName);
|
||||
|
||||
out->appendf("%s(%s, %s)",
|
||||
sample_function_name(varyingType, fCtxInfo.glslGeneration()),
|
||||
@ -306,7 +306,7 @@ void GrGLShaderBuilder::appendTextureLookup(ShaderType type,
|
||||
const GrGLShaderBuilder::TextureSampler& sampler,
|
||||
const char* coordName,
|
||||
GrSLType varyingType) {
|
||||
GrAssert(kFragment_ShaderType == type);
|
||||
SkASSERT(kFragment_ShaderType == type);
|
||||
this->appendTextureLookup(&fFSCode, sampler, coordName, varyingType);
|
||||
}
|
||||
|
||||
@ -316,7 +316,7 @@ void GrGLShaderBuilder::appendTextureLookupAndModulate(
|
||||
const GrGLShaderBuilder::TextureSampler& sampler,
|
||||
const char* coordName,
|
||||
GrSLType varyingType) {
|
||||
GrAssert(kFragment_ShaderType == type);
|
||||
SkASSERT(kFragment_ShaderType == type);
|
||||
SkString lookup;
|
||||
this->appendTextureLookup(&lookup, sampler, coordName, varyingType);
|
||||
GrGLSLModulatef<4>(&fFSCode, modulation, lookup.c_str());
|
||||
@ -339,7 +339,7 @@ GrGLShaderBuilder::DstReadKey GrGLShaderBuilder::KeyForDstRead(const GrTexture*
|
||||
if (GrGLCaps::kNone_FBFetchType != caps.fbFetchType()) {
|
||||
return key;
|
||||
}
|
||||
GrAssert(NULL != dstCopy);
|
||||
SkASSERT(NULL != dstCopy);
|
||||
if (!caps.textureSwizzleSupport() && GrPixelConfigIsAlphaOnly(dstCopy->config())) {
|
||||
// The fact that the config is alpha-only must be considered when generating code.
|
||||
key |= kUseAlphaConfig_DstReadKeyBit;
|
||||
@ -347,7 +347,7 @@ GrGLShaderBuilder::DstReadKey GrGLShaderBuilder::KeyForDstRead(const GrTexture*
|
||||
if (kTopLeft_GrSurfaceOrigin == dstCopy->origin()) {
|
||||
key |= kTopLeftOrigin_DstReadKeyBit;
|
||||
}
|
||||
GrAssert(static_cast<DstReadKey>(key) == key);
|
||||
SkASSERT(static_cast<DstReadKey>(key) == key);
|
||||
return static_cast<DstReadKey>(key);
|
||||
}
|
||||
|
||||
@ -382,10 +382,10 @@ GrGLUniformManager::UniformHandle GrGLShaderBuilder::addUniformArray(uint32_t vi
|
||||
const char* name,
|
||||
int count,
|
||||
const char** outName) {
|
||||
GrAssert(name && strlen(name));
|
||||
SkASSERT(name && strlen(name));
|
||||
SkDEBUGCODE(static const uint32_t kVisibilityMask = kVertex_ShaderType | kFragment_ShaderType);
|
||||
GrAssert(0 == (~kVisibilityMask & visibility));
|
||||
GrAssert(0 != visibility);
|
||||
SkASSERT(0 == (~kVisibilityMask & visibility));
|
||||
SkASSERT(0 != visibility);
|
||||
|
||||
BuilderUniform& uni = fUniforms.push_back();
|
||||
UniformHandle h = GrGLUniformManager::UniformHandle::CreateFromUniformIndex(fUniforms.count() - 1);
|
||||
@ -393,7 +393,7 @@ GrGLUniformManager::UniformHandle GrGLShaderBuilder::addUniformArray(uint32_t vi
|
||||
fUniformManager.appendUniform(type, count);
|
||||
// We expect the uniform manager to initially have no uniforms and that all uniforms are added
|
||||
// by this function. Therefore, the handles should match.
|
||||
GrAssert(h2 == h);
|
||||
SkASSERT(h2 == h);
|
||||
uni.fVariable.setType(type);
|
||||
uni.fVariable.setTypeModifier(GrGLShaderVar::kUniform_TypeModifier);
|
||||
this->nameVariable(uni.fVariable.accessName(), 'u', name);
|
||||
@ -421,7 +421,7 @@ bool GrGLShaderBuilder::addAttribute(GrSLType type,
|
||||
const GrGLShaderVar& attr = fVSAttrs[i];
|
||||
// if attribute already added, don't add it again
|
||||
if (attr.getName().equals(name)) {
|
||||
GrAssert(attr.getType() == type);
|
||||
SkASSERT(attr.getType() == type);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -505,7 +505,7 @@ const char* GrGLShaderBuilder::fragmentPosition() {
|
||||
// temporarily change the stage index because we're inserting non-stage code.
|
||||
CodeStage::AutoStageRestore csar(&fCodeStage, NULL);
|
||||
|
||||
GrAssert(!fRTHeightUniform.isValid());
|
||||
SkASSERT(!fRTHeightUniform.isValid());
|
||||
const char* rtHeightName;
|
||||
|
||||
fRTHeightUniform = this->addUniform(kFragment_ShaderType,
|
||||
@ -517,7 +517,7 @@ const char* GrGLShaderBuilder::fragmentPosition() {
|
||||
kCoordName, rtHeightName);
|
||||
fSetupFragPosition = true;
|
||||
}
|
||||
GrAssert(fRTHeightUniform.isValid());
|
||||
SkASSERT(fRTHeightUniform.isValid());
|
||||
return kCoordName;
|
||||
}
|
||||
}
|
||||
@ -530,7 +530,7 @@ void GrGLShaderBuilder::emitFunction(ShaderType shader,
|
||||
const GrGLShaderVar* args,
|
||||
const char* body,
|
||||
SkString* outName) {
|
||||
GrAssert(kFragment_ShaderType == shader);
|
||||
SkASSERT(kFragment_ShaderType == shader);
|
||||
fFSFunctions.append(GrGLSLTypeString(returnType));
|
||||
this->nameVariable(outName, '\0', name);
|
||||
fFSFunctions.appendf(" %s", outName->c_str());
|
||||
@ -623,7 +623,7 @@ void GrGLShaderBuilder::getShader(ShaderType type, SkString* shaderStr) const {
|
||||
this->appendUniformDecls(kFragment_ShaderType, shaderStr);
|
||||
this->appendDecls(fFSInputs, shaderStr);
|
||||
// We shouldn't have declared outputs on 1.10
|
||||
GrAssert(k110_GrGLSLGeneration != fCtxInfo.glslGeneration() || fFSOutputs.empty());
|
||||
SkASSERT(k110_GrGLSLGeneration != fCtxInfo.glslGeneration() || fFSOutputs.empty());
|
||||
this->appendDecls(fFSOutputs, shaderStr);
|
||||
shaderStr->append(fFSFunctions);
|
||||
shaderStr->append("void main() {\n");
|
||||
@ -651,7 +651,7 @@ void GrGLShaderBuilder::emitEffects(
|
||||
SkString outColor;
|
||||
|
||||
for (int e = 0; e < effectCnt; ++e) {
|
||||
GrAssert(NULL != effectStages[e] && NULL != effectStages[e]->getEffect());
|
||||
SkASSERT(NULL != effectStages[e] && NULL != effectStages[e]->getEffect());
|
||||
const GrEffectStage& stage = *effectStages[e];
|
||||
const GrEffectRef& effect = *stage.getEffect();
|
||||
|
||||
|
@ -42,8 +42,8 @@ public:
|
||||
TextureSampler(const TextureSampler& other) { *this = other; }
|
||||
|
||||
TextureSampler& operator= (const TextureSampler& other) {
|
||||
GrAssert(0 == fConfigComponentMask);
|
||||
GrAssert(!fSamplerUniform.isValid());
|
||||
SkASSERT(0 == fConfigComponentMask);
|
||||
SkASSERT(!fSamplerUniform.isValid());
|
||||
|
||||
fConfigComponentMask = other.fConfigComponentMask;
|
||||
fSamplerUniform = other.fSamplerUniform;
|
||||
@ -64,24 +64,24 @@ public:
|
||||
uint32_t configComponentMask,
|
||||
const char* swizzle,
|
||||
int idx) {
|
||||
GrAssert(!this->isInitialized());
|
||||
GrAssert(0 != configComponentMask);
|
||||
GrAssert(!fSamplerUniform.isValid());
|
||||
SkASSERT(!this->isInitialized());
|
||||
SkASSERT(0 != configComponentMask);
|
||||
SkASSERT(!fSamplerUniform.isValid());
|
||||
|
||||
GrAssert(NULL != builder);
|
||||
SkASSERT(NULL != builder);
|
||||
SkString name;
|
||||
name.printf("Sampler%d", idx);
|
||||
fSamplerUniform = builder->addUniform(GrGLShaderBuilder::kFragment_ShaderType,
|
||||
kSampler2D_GrSLType,
|
||||
name.c_str());
|
||||
GrAssert(fSamplerUniform.isValid());
|
||||
SkASSERT(fSamplerUniform.isValid());
|
||||
|
||||
fConfigComponentMask = configComponentMask;
|
||||
memcpy(fSwizzle, swizzle, 4);
|
||||
}
|
||||
|
||||
void init(GrGLShaderBuilder* builder, const GrTextureAccess* access, int idx) {
|
||||
GrAssert(NULL != access);
|
||||
SkASSERT(NULL != access);
|
||||
this->init(builder,
|
||||
GrPixelConfigComponentMask(access->getTexture()->config()),
|
||||
access->getSwizzle(),
|
||||
@ -375,7 +375,7 @@ private:
|
||||
class AutoStageRestore : GrNoncopyable {
|
||||
public:
|
||||
AutoStageRestore(CodeStage* codeStage, const GrEffectStage* newStage) {
|
||||
GrAssert(NULL != codeStage);
|
||||
SkASSERT(NULL != codeStage);
|
||||
fSavedIndex = codeStage->fCurrentIndex;
|
||||
fSavedEffectStage = codeStage->fEffectStage;
|
||||
|
||||
@ -398,7 +398,7 @@ private:
|
||||
const GrEffectStage* fSavedEffectStage;
|
||||
};
|
||||
private:
|
||||
void validate() const { GrAssert((NULL == fEffectStage) == (-1 == fCurrentIndex)); }
|
||||
void validate() const { SkASSERT((NULL == fEffectStage) == (-1 == fCurrentIndex)); }
|
||||
int fNextIndex;
|
||||
int fCurrentIndex;
|
||||
const GrEffectStage* fEffectStage;
|
||||
|
@ -69,7 +69,7 @@ public:
|
||||
|
||||
GrGLShaderVar(const char* name, GrSLType type, int arrayCount = kNonArray,
|
||||
Precision precision = kDefault_Precision) {
|
||||
GrAssert(kVoid_GrSLType != type);
|
||||
SkASSERT(kVoid_GrSLType != type);
|
||||
fType = type;
|
||||
fTypeModifier = kNone_TypeModifier;
|
||||
fCount = arrayCount;
|
||||
@ -87,7 +87,7 @@ public:
|
||||
, fPrecision(var.fPrecision)
|
||||
, fOrigin(var.fOrigin)
|
||||
, fUseUniformFloatArrays(var.fUseUniformFloatArrays) {
|
||||
GrAssert(kVoid_GrSLType != var.fType);
|
||||
SkASSERT(kVoid_GrSLType != var.fType);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -107,7 +107,7 @@ public:
|
||||
Precision precision = kDefault_Precision,
|
||||
Origin origin = kDefault_Origin,
|
||||
bool useUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS) {
|
||||
GrAssert(kVoid_GrSLType != type);
|
||||
SkASSERT(kVoid_GrSLType != type);
|
||||
fType = type;
|
||||
fTypeModifier = typeModifier;
|
||||
fName = name;
|
||||
@ -126,7 +126,7 @@ public:
|
||||
Precision precision = kDefault_Precision,
|
||||
Origin origin = kDefault_Origin,
|
||||
bool useUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS) {
|
||||
GrAssert(kVoid_GrSLType != type);
|
||||
SkASSERT(kVoid_GrSLType != type);
|
||||
fType = type;
|
||||
fTypeModifier = typeModifier;
|
||||
fName = name;
|
||||
@ -146,7 +146,7 @@ public:
|
||||
Precision precision = kDefault_Precision,
|
||||
Origin origin = kDefault_Origin,
|
||||
bool useUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS) {
|
||||
GrAssert(kVoid_GrSLType != type);
|
||||
SkASSERT(kVoid_GrSLType != type);
|
||||
fType = type;
|
||||
fTypeModifier = typeModifier;
|
||||
fName = name;
|
||||
@ -166,7 +166,7 @@ public:
|
||||
Precision precision = kDefault_Precision,
|
||||
Origin origin = kDefault_Origin,
|
||||
bool useUniformFloatArrays = USE_UNIFORM_FLOAT_ARRAYS) {
|
||||
GrAssert(kVoid_GrSLType != type);
|
||||
SkASSERT(kVoid_GrSLType != type);
|
||||
fType = type;
|
||||
fTypeModifier = typeModifier;
|
||||
fName = name;
|
||||
@ -275,7 +275,7 @@ public:
|
||||
GrGLSLTypeString(effectiveType),
|
||||
this->getName().c_str());
|
||||
} else {
|
||||
GrAssert(this->getArrayCount() > 0);
|
||||
SkASSERT(this->getArrayCount() > 0);
|
||||
out->appendf("%s %s[%d]",
|
||||
GrGLSLTypeString(effectiveType),
|
||||
this->getName().c_str(),
|
||||
|
@ -18,7 +18,7 @@ void GrGLTexture::init(GrGpuGL* gpu,
|
||||
const Desc& textureDesc,
|
||||
const GrGLRenderTarget::Desc* rtDesc) {
|
||||
|
||||
GrAssert(0 != textureDesc.fTextureID);
|
||||
SkASSERT(0 != textureDesc.fTextureID);
|
||||
|
||||
fTexParams.invalidate();
|
||||
fTexParamsTimestamp = GrGpu::kExpiredTimestamp;
|
||||
|
@ -11,13 +11,13 @@
|
||||
#include "SkMatrix.h"
|
||||
|
||||
#define ASSERT_ARRAY_UPLOAD_IN_BOUNDS(UNI, OFFSET, COUNT) \
|
||||
GrAssert(offset + arrayCount <= uni.fArrayCount || \
|
||||
SkASSERT(offset + arrayCount <= uni.fArrayCount || \
|
||||
(0 == offset && 1 == arrayCount && GrGLShaderVar::kNonArray == uni.fArrayCount))
|
||||
|
||||
GrGLUniformManager::UniformHandle GrGLUniformManager::appendUniform(GrSLType type, int arrayCount) {
|
||||
int idx = fUniforms.count();
|
||||
Uniform& uni = fUniforms.push_back();
|
||||
GrAssert(GrGLShaderVar::kNonArray == arrayCount || arrayCount > 0);
|
||||
SkASSERT(GrGLShaderVar::kNonArray == arrayCount || arrayCount > 0);
|
||||
uni.fArrayCount = arrayCount;
|
||||
uni.fType = type;
|
||||
uni.fVSLocation = kUnusedUniform;
|
||||
@ -27,12 +27,12 @@ GrGLUniformManager::UniformHandle GrGLUniformManager::appendUniform(GrSLType typ
|
||||
|
||||
void GrGLUniformManager::setSampler(UniformHandle u, GrGLint texUnit) const {
|
||||
const Uniform& uni = fUniforms[u.toUniformIndex()];
|
||||
GrAssert(uni.fType == kSampler2D_GrSLType);
|
||||
GrAssert(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
SkASSERT(uni.fType == kSampler2D_GrSLType);
|
||||
SkASSERT(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
// FIXME: We still insert a single sampler uniform for every stage. If the shader does not
|
||||
// reference the sampler then the compiler may have optimized it out. Uncomment this assert
|
||||
// once stages insert their own samplers.
|
||||
// GrAssert(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
// SkASSERT(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
if (kUnusedUniform != uni.fFSLocation) {
|
||||
GR_GL_CALL(fContext.interface(), Uniform1i(uni.fFSLocation, texUnit));
|
||||
}
|
||||
@ -43,9 +43,9 @@ void GrGLUniformManager::setSampler(UniformHandle u, GrGLint texUnit) const {
|
||||
|
||||
void GrGLUniformManager::set1f(UniformHandle u, GrGLfloat v0) const {
|
||||
const Uniform& uni = fUniforms[u.toUniformIndex()];
|
||||
GrAssert(uni.fType == kFloat_GrSLType);
|
||||
GrAssert(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
GrAssert(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
SkASSERT(uni.fType == kFloat_GrSLType);
|
||||
SkASSERT(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
SkASSERT(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
if (kUnusedUniform != uni.fFSLocation) {
|
||||
GR_GL_CALL(fContext.interface(), Uniform1f(uni.fFSLocation, v0));
|
||||
}
|
||||
@ -59,13 +59,13 @@ void GrGLUniformManager::set1fv(UniformHandle u,
|
||||
int arrayCount,
|
||||
const GrGLfloat v[]) const {
|
||||
const Uniform& uni = fUniforms[u.toUniformIndex()];
|
||||
GrAssert(uni.fType == kFloat_GrSLType);
|
||||
GrAssert(arrayCount > 0);
|
||||
SkASSERT(uni.fType == kFloat_GrSLType);
|
||||
SkASSERT(arrayCount > 0);
|
||||
ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, offset, arrayCount);
|
||||
// This assert fires in some instances of the two-pt gradient for its VSParams.
|
||||
// Once the uniform manager is responsible for inserting the duplicate uniform
|
||||
// arrays in VS and FS driver bug workaround, this can be enabled.
|
||||
//GrAssert(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
//SkASSERT(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
if (kUnusedUniform != uni.fFSLocation) {
|
||||
GR_GL_CALL(fContext.interface(), Uniform1fv(uni.fFSLocation + offset, arrayCount, v));
|
||||
}
|
||||
@ -76,9 +76,9 @@ void GrGLUniformManager::set1fv(UniformHandle u,
|
||||
|
||||
void GrGLUniformManager::set2f(UniformHandle u, GrGLfloat v0, GrGLfloat v1) const {
|
||||
const Uniform& uni = fUniforms[u.toUniformIndex()];
|
||||
GrAssert(uni.fType == kVec2f_GrSLType);
|
||||
GrAssert(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
GrAssert(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
SkASSERT(uni.fType == kVec2f_GrSLType);
|
||||
SkASSERT(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
SkASSERT(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
if (kUnusedUniform != uni.fFSLocation) {
|
||||
GR_GL_CALL(fContext.interface(), Uniform2f(uni.fFSLocation, v0, v1));
|
||||
}
|
||||
@ -92,10 +92,10 @@ void GrGLUniformManager::set2fv(UniformHandle u,
|
||||
int arrayCount,
|
||||
const GrGLfloat v[]) const {
|
||||
const Uniform& uni = fUniforms[u.toUniformIndex()];
|
||||
GrAssert(uni.fType == kVec2f_GrSLType);
|
||||
GrAssert(arrayCount > 0);
|
||||
SkASSERT(uni.fType == kVec2f_GrSLType);
|
||||
SkASSERT(arrayCount > 0);
|
||||
ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, offset, arrayCount);
|
||||
GrAssert(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
SkASSERT(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
if (kUnusedUniform != uni.fFSLocation) {
|
||||
GR_GL_CALL(fContext.interface(), Uniform2fv(uni.fFSLocation + offset, arrayCount, v));
|
||||
}
|
||||
@ -106,9 +106,9 @@ void GrGLUniformManager::set2fv(UniformHandle u,
|
||||
|
||||
void GrGLUniformManager::set3f(UniformHandle u, GrGLfloat v0, GrGLfloat v1, GrGLfloat v2) const {
|
||||
const Uniform& uni = fUniforms[u.toUniformIndex()];
|
||||
GrAssert(uni.fType == kVec3f_GrSLType);
|
||||
GrAssert(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
GrAssert(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
SkASSERT(uni.fType == kVec3f_GrSLType);
|
||||
SkASSERT(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
SkASSERT(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
if (kUnusedUniform != uni.fFSLocation) {
|
||||
GR_GL_CALL(fContext.interface(), Uniform3f(uni.fFSLocation, v0, v1, v2));
|
||||
}
|
||||
@ -122,10 +122,10 @@ void GrGLUniformManager::set3fv(UniformHandle u,
|
||||
int arrayCount,
|
||||
const GrGLfloat v[]) const {
|
||||
const Uniform& uni = fUniforms[u.toUniformIndex()];
|
||||
GrAssert(uni.fType == kVec3f_GrSLType);
|
||||
GrAssert(arrayCount > 0);
|
||||
SkASSERT(uni.fType == kVec3f_GrSLType);
|
||||
SkASSERT(arrayCount > 0);
|
||||
ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, offset, arrayCount);
|
||||
GrAssert(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
SkASSERT(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
if (kUnusedUniform != uni.fFSLocation) {
|
||||
GR_GL_CALL(fContext.interface(), Uniform3fv(uni.fFSLocation + offset, arrayCount, v));
|
||||
}
|
||||
@ -140,9 +140,9 @@ void GrGLUniformManager::set4f(UniformHandle u,
|
||||
GrGLfloat v2,
|
||||
GrGLfloat v3) const {
|
||||
const Uniform& uni = fUniforms[u.toUniformIndex()];
|
||||
GrAssert(uni.fType == kVec4f_GrSLType);
|
||||
GrAssert(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
GrAssert(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
SkASSERT(uni.fType == kVec4f_GrSLType);
|
||||
SkASSERT(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
SkASSERT(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
if (kUnusedUniform != uni.fFSLocation) {
|
||||
GR_GL_CALL(fContext.interface(), Uniform4f(uni.fFSLocation, v0, v1, v2, v3));
|
||||
}
|
||||
@ -156,9 +156,9 @@ void GrGLUniformManager::set4fv(UniformHandle u,
|
||||
int arrayCount,
|
||||
const GrGLfloat v[]) const {
|
||||
const Uniform& uni = fUniforms[u.toUniformIndex()];
|
||||
GrAssert(uni.fType == kVec4f_GrSLType);
|
||||
GrAssert(arrayCount > 0);
|
||||
GrAssert(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
SkASSERT(uni.fType == kVec4f_GrSLType);
|
||||
SkASSERT(arrayCount > 0);
|
||||
SkASSERT(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
if (kUnusedUniform != uni.fFSLocation) {
|
||||
GR_GL_CALL(fContext.interface(), Uniform4fv(uni.fFSLocation + offset, arrayCount, v));
|
||||
}
|
||||
@ -169,10 +169,10 @@ void GrGLUniformManager::set4fv(UniformHandle u,
|
||||
|
||||
void GrGLUniformManager::setMatrix3f(UniformHandle u, const GrGLfloat matrix[]) const {
|
||||
const Uniform& uni = fUniforms[u.toUniformIndex()];
|
||||
GrAssert(uni.fType == kMat33f_GrSLType);
|
||||
GrAssert(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
SkASSERT(uni.fType == kMat33f_GrSLType);
|
||||
SkASSERT(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
// TODO: Re-enable this assert once texture matrices aren't forced on all effects
|
||||
// GrAssert(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
// SkASSERT(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
if (kUnusedUniform != uni.fFSLocation) {
|
||||
GR_GL_CALL(fContext.interface(), UniformMatrix3fv(uni.fFSLocation, 1, false, matrix));
|
||||
}
|
||||
@ -183,9 +183,9 @@ void GrGLUniformManager::setMatrix3f(UniformHandle u, const GrGLfloat matrix[])
|
||||
|
||||
void GrGLUniformManager::setMatrix4f(UniformHandle u, const GrGLfloat matrix[]) const {
|
||||
const Uniform& uni = fUniforms[u.toUniformIndex()];
|
||||
GrAssert(uni.fType == kMat44f_GrSLType);
|
||||
GrAssert(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
GrAssert(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
SkASSERT(uni.fType == kMat44f_GrSLType);
|
||||
SkASSERT(GrGLShaderVar::kNonArray == uni.fArrayCount);
|
||||
SkASSERT(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
if (kUnusedUniform != uni.fFSLocation) {
|
||||
GR_GL_CALL(fContext.interface(), UniformMatrix4fv(uni.fFSLocation, 1, false, matrix));
|
||||
}
|
||||
@ -199,10 +199,10 @@ void GrGLUniformManager::setMatrix3fv(UniformHandle u,
|
||||
int arrayCount,
|
||||
const GrGLfloat matrices[]) const {
|
||||
const Uniform& uni = fUniforms[u.toUniformIndex()];
|
||||
GrAssert(uni.fType == kMat33f_GrSLType);
|
||||
GrAssert(arrayCount > 0);
|
||||
SkASSERT(uni.fType == kMat33f_GrSLType);
|
||||
SkASSERT(arrayCount > 0);
|
||||
ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, offset, arrayCount);
|
||||
GrAssert(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
SkASSERT(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
if (kUnusedUniform != uni.fFSLocation) {
|
||||
GR_GL_CALL(fContext.interface(),
|
||||
UniformMatrix3fv(uni.fFSLocation + offset, arrayCount, false, matrices));
|
||||
@ -218,10 +218,10 @@ void GrGLUniformManager::setMatrix4fv(UniformHandle u,
|
||||
int arrayCount,
|
||||
const GrGLfloat matrices[]) const {
|
||||
const Uniform& uni = fUniforms[u.toUniformIndex()];
|
||||
GrAssert(uni.fType == kMat44f_GrSLType);
|
||||
GrAssert(arrayCount > 0);
|
||||
SkASSERT(uni.fType == kMat44f_GrSLType);
|
||||
SkASSERT(arrayCount > 0);
|
||||
ASSERT_ARRAY_UPLOAD_IN_BOUNDS(uni, offset, arrayCount);
|
||||
GrAssert(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
SkASSERT(kUnusedUniform != uni.fFSLocation || kUnusedUniform != uni.fVSLocation);
|
||||
if (kUnusedUniform != uni.fFSLocation) {
|
||||
GR_GL_CALL(fContext.interface(),
|
||||
UniformMatrix4fv(uni.fFSLocation + offset, arrayCount, false, matrices));
|
||||
@ -250,11 +250,11 @@ void GrGLUniformManager::setSkMatrix(UniformHandle u, const SkMatrix& matrix) co
|
||||
|
||||
|
||||
void GrGLUniformManager::getUniformLocations(GrGLuint programID, const BuilderUniformArray& uniforms) {
|
||||
GrAssert(uniforms.count() == fUniforms.count());
|
||||
SkASSERT(uniforms.count() == fUniforms.count());
|
||||
int count = fUniforms.count();
|
||||
for (int i = 0; i < count; ++i) {
|
||||
GrAssert(uniforms[i].fVariable.getType() == fUniforms[i].fType);
|
||||
GrAssert(uniforms[i].fVariable.getArrayCount() == fUniforms[i].fArrayCount);
|
||||
SkASSERT(uniforms[i].fVariable.getType() == fUniforms[i].fType);
|
||||
SkASSERT(uniforms[i].fVariable.getArrayCount() == fUniforms[i].fArrayCount);
|
||||
GrGLint location;
|
||||
// TODO: Move the Xoom uniform array in both FS and VS bug workaround here.
|
||||
GR_GL_CALL_RET(fContext.interface(), location,
|
||||
|
@ -37,10 +37,10 @@ public:
|
||||
private:
|
||||
UniformHandle(int value)
|
||||
: fValue(~value) {
|
||||
GrAssert(isValid());
|
||||
SkASSERT(isValid());
|
||||
}
|
||||
|
||||
int toUniformIndex() const { GrAssert(isValid()); return ~fValue; }
|
||||
int toUniformIndex() const { SkASSERT(isValid()); return ~fValue; }
|
||||
|
||||
int fValue;
|
||||
friend class GrGLUniformManager; // For accessing toUniformIndex().
|
||||
|
@ -93,7 +93,7 @@ bool get_gl_version_for_mesa(int mesaMajorVersion, int* major, int* minor) {
|
||||
|
||||
GrGLBinding GrGLGetBindingInUseFromString(const char* versionString) {
|
||||
if (NULL == versionString) {
|
||||
GrAssert(!"NULL GL version string.");
|
||||
SkASSERT(!"NULL GL version string.");
|
||||
return kNone_GrGLBinding;
|
||||
}
|
||||
|
||||
@ -129,7 +129,7 @@ bool GrGLIsMesaFromVersionString(const char* versionString) {
|
||||
|
||||
GrGLVersion GrGLGetVersionFromString(const char* versionString) {
|
||||
if (NULL == versionString) {
|
||||
GrAssert(!"NULL GL version string.");
|
||||
SkASSERT(!"NULL GL version string.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -168,7 +168,7 @@ GrGLVersion GrGLGetVersionFromString(const char* versionString) {
|
||||
|
||||
GrGLSLVersion GrGLGetGLSLVersionFromString(const char* versionString) {
|
||||
if (NULL == versionString) {
|
||||
GrAssert(!"NULL GLSL version string.");
|
||||
SkASSERT(!"NULL GLSL version string.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@ void GrGLAttribArrayState::set(const GrGpuGL* gpu,
|
||||
GrGLboolean normalized,
|
||||
GrGLsizei stride,
|
||||
GrGLvoid* offset) {
|
||||
GrAssert(index >= 0 && index < fAttribArrayStates.count());
|
||||
SkASSERT(index >= 0 && index < fAttribArrayStates.count());
|
||||
AttribArrayState* array = &fAttribArrayStates[index];
|
||||
if (!array->fEnableIsValid || !array->fEnabled) {
|
||||
GR_GL_CALL(gpu->glInterface(), EnableVertexAttribArray(index));
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user