Pull out shader-specific caps into GrShaderCaps and GrGLSLCaps
BUG=skia: Review URL: https://codereview.chromium.org/1116713002
This commit is contained in:
parent
23c5f51371
commit
e9c0fc616d
@ -11,8 +11,7 @@
|
||||
#include "GrProcessor.h"
|
||||
|
||||
class GrCoordTransform;
|
||||
class GrGLCaps;
|
||||
typedef GrGLCaps GrGLSLCaps;
|
||||
class GrGLSLCaps;
|
||||
class GrGLFragmentProcessor;
|
||||
class GrProcessorKeyBuilder;
|
||||
|
||||
|
@ -14,9 +14,8 @@
|
||||
#include "GrTypes.h"
|
||||
#include "SkXfermode.h"
|
||||
|
||||
class GrDrawTargetCaps;
|
||||
class GrGLCaps;
|
||||
typedef GrGLCaps GrGLSLCaps;
|
||||
class GrShaderCaps;
|
||||
class GrGLSLCaps;
|
||||
class GrGLXferProcessor;
|
||||
class GrProcOptInfo;
|
||||
|
||||
|
@ -687,7 +687,7 @@ GrGeometryProcessor* QuadEdgeEffect::TestCreate(SkRandom* random,
|
||||
const GrDrawTargetCaps& caps,
|
||||
GrTexture*[]) {
|
||||
// Doesn't work without derivative instructions.
|
||||
return caps.shaderDerivativeSupport() ?
|
||||
return caps.shaderCaps()->shaderDerivativeSupport() ?
|
||||
QuadEdgeEffect::Create(GrRandomColor(random),
|
||||
GrTest::TestMatrix(random)) : NULL;
|
||||
}
|
||||
@ -700,7 +700,7 @@ bool GrAAConvexPathRenderer::canDrawPath(const GrDrawTarget* target,
|
||||
const SkPath& path,
|
||||
const GrStrokeInfo& stroke,
|
||||
bool antiAlias) const {
|
||||
return (target->caps()->shaderDerivativeSupport() && antiAlias &&
|
||||
return (target->caps()->shaderCaps()->shaderDerivativeSupport() && antiAlias &&
|
||||
stroke.isFillStyle() && !path.isInverseFillType() && path.isConvex());
|
||||
}
|
||||
|
||||
|
@ -92,8 +92,8 @@ bool GrAADistanceFieldPathRenderer::canDrawPath(const GrDrawTarget* target,
|
||||
|
||||
// TODO: Support inverse fill
|
||||
// TODO: Support strokes
|
||||
if (!target->caps()->shaderDerivativeSupport() || !antiAlias || path.isInverseFillType()
|
||||
|| path.isVolatile() || !stroke.isFillStyle()) {
|
||||
if (!target->caps()->shaderCaps()->shaderDerivativeSupport() || !antiAlias
|
||||
|| path.isInverseFillType() || path.isVolatile() || !stroke.isFillStyle()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -649,7 +649,7 @@ bool GrAAHairLinePathRenderer::canDrawPath(const GrDrawTarget* target,
|
||||
}
|
||||
|
||||
if (SkPath::kLine_SegmentMask == path.getSegmentMasks() ||
|
||||
target->caps()->shaderDerivativeSupport()) {
|
||||
target->caps()->shaderCaps()->shaderDerivativeSupport()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -447,7 +447,7 @@ inline bool GrAtlasTextContext::canDrawAsDistanceFields(const SkPaint& skPaint,
|
||||
// rasterizers and mask filters modify alpha, which doesn't
|
||||
// translate well to distance
|
||||
if (skPaint.getRasterizer() || skPaint.getMaskFilter() ||
|
||||
!fContext->getTextTarget()->caps()->shaderDerivativeSupport()) {
|
||||
!fContext->getTextTarget()->caps()->shaderCaps()->shaderDerivativeSupport()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -226,7 +226,7 @@ GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget,
|
||||
const SkDeviceProperties&
|
||||
leakyProperties,
|
||||
bool enableDistanceFieldFonts) {
|
||||
if (fGpu->caps()->pathRenderingSupport() && renderTarget->isMultisampled()) {
|
||||
if (fGpu->caps()->shaderCaps()->pathRenderingSupport() && renderTarget->isMultisampled()) {
|
||||
GrStencilAttachment* sb = renderTarget->renderTargetPriv().attachStencilAttachment();
|
||||
if (sb) {
|
||||
return GrStencilAndCoverTextContext::Create(this, gpuDevice, leakyProperties);
|
||||
@ -1888,7 +1888,7 @@ int GrContext::getRecommendedSampleCount(GrPixelConfig config,
|
||||
return 0;
|
||||
}
|
||||
int chosenSampleCount = 0;
|
||||
if (fGpu->caps()->pathRenderingSupport()) {
|
||||
if (fGpu->caps()->shaderCaps()->pathRenderingSupport()) {
|
||||
if (dpi >= 250.0f) {
|
||||
chosenSampleCount = 4;
|
||||
} else {
|
||||
|
@ -27,10 +27,10 @@ void GrCoordTransform::reset(GrCoordSet sourceCoords, const SkMatrix& m, const G
|
||||
int subPixelThresh = filter > GrTextureParams::kNone_FilterMode ? 4 : 1;
|
||||
fPrecision = kDefault_GrSLPrecision;
|
||||
if (texture->getContext()) {
|
||||
const GrDrawTargetCaps* caps = texture->getContext()->getGpu()->caps();
|
||||
const GrShaderCaps* caps = texture->getContext()->getGpu()->caps()->shaderCaps();
|
||||
if (caps->floatPrecisionVaries()) {
|
||||
int maxD = SkTMax(texture->width(), texture->height());
|
||||
const GrDrawTargetCaps::PrecisionInfo* info;
|
||||
const GrShaderCaps::PrecisionInfo* info;
|
||||
info = &caps->getFloatShaderPrecisionInfo(kFragment_GrShaderType, fPrecision);
|
||||
do {
|
||||
SkASSERT(info->supported());
|
||||
|
@ -213,7 +213,7 @@ void GrDrawTarget::stencilPath(GrPipelineBuilder* pipelineBuilder,
|
||||
GrPathRendering::FillType fill) {
|
||||
// TODO: extract portions of checkDraw that are relevant to path stenciling.
|
||||
SkASSERT(path);
|
||||
SkASSERT(this->caps()->pathRenderingSupport());
|
||||
SkASSERT(this->caps()->shaderCaps()->pathRenderingSupport());
|
||||
SkASSERT(pipelineBuilder);
|
||||
|
||||
// Setup clip
|
||||
@ -239,7 +239,7 @@ void GrDrawTarget::drawPath(GrPipelineBuilder* pipelineBuilder,
|
||||
GrPathRendering::FillType fill) {
|
||||
// TODO: extract portions of checkDraw that are relevant to path rendering.
|
||||
SkASSERT(path);
|
||||
SkASSERT(this->caps()->pathRenderingSupport());
|
||||
SkASSERT(this->caps()->shaderCaps()->pathRenderingSupport());
|
||||
SkASSERT(pipelineBuilder);
|
||||
|
||||
SkRect devBounds = path->getBounds();
|
||||
@ -277,7 +277,7 @@ void GrDrawTarget::drawPaths(GrPipelineBuilder* pipelineBuilder,
|
||||
PathTransformType transformType,
|
||||
int count,
|
||||
GrPathRendering::FillType fill) {
|
||||
SkASSERT(this->caps()->pathRenderingSupport());
|
||||
SkASSERT(this->caps()->shaderCaps()->pathRenderingSupport());
|
||||
SkASSERT(pathRange);
|
||||
SkASSERT(indices);
|
||||
SkASSERT(0 == reinterpret_cast<long>(indices) % GrPathRange::PathIndexSizeInBytes(indexType));
|
||||
@ -541,16 +541,92 @@ GrDrawTarget::PipelineInfo::PipelineInfo(GrPipelineBuilder* pipelineBuilder,
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void GrShaderCaps::reset() {
|
||||
fShaderDerivativeSupport = false;
|
||||
fGeometryShaderSupport = false;
|
||||
fPathRenderingSupport = false;
|
||||
fDstReadInShaderSupport = false;
|
||||
fDualSourceBlendingSupport = false;
|
||||
|
||||
fShaderPrecisionVaries = false;
|
||||
}
|
||||
|
||||
GrShaderCaps& GrShaderCaps::operator=(const GrShaderCaps& other) {
|
||||
fShaderDerivativeSupport = other.fShaderDerivativeSupport;
|
||||
fGeometryShaderSupport = other.fGeometryShaderSupport;
|
||||
fPathRenderingSupport = other.fPathRenderingSupport;
|
||||
fDstReadInShaderSupport = other.fDstReadInShaderSupport;
|
||||
fDualSourceBlendingSupport = other.fDualSourceBlendingSupport;
|
||||
|
||||
fShaderPrecisionVaries = other.fShaderPrecisionVaries;
|
||||
for (int s = 0; s < kGrShaderTypeCount; ++s) {
|
||||
for (int p = 0; p < kGrSLPrecisionCount; ++p) {
|
||||
fFloatPrecisions[s][p] = other.fFloatPrecisions[s][p];
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
static const char* shader_type_to_string(GrShaderType type) {
|
||||
switch (type) {
|
||||
case kVertex_GrShaderType:
|
||||
return "vertex";
|
||||
case kGeometry_GrShaderType:
|
||||
return "geometry";
|
||||
case kFragment_GrShaderType:
|
||||
return "fragment";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
static const char* precision_to_string(GrSLPrecision p) {
|
||||
switch (p) {
|
||||
case kLow_GrSLPrecision:
|
||||
return "low";
|
||||
case kMedium_GrSLPrecision:
|
||||
return "medium";
|
||||
case kHigh_GrSLPrecision:
|
||||
return "high";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
SkString GrShaderCaps::dump() const {
|
||||
SkString r;
|
||||
static const char* gNY[] = { "NO", "YES" };
|
||||
r.appendf("Shader Derivative Support : %s\n", gNY[fShaderDerivativeSupport]);
|
||||
r.appendf("Geometry Shader Support : %s\n", gNY[fGeometryShaderSupport]);
|
||||
r.appendf("Path Rendering Support : %s\n", gNY[fPathRenderingSupport]);
|
||||
r.appendf("Dst Read In Shader Support : %s\n", gNY[fDstReadInShaderSupport]);
|
||||
r.appendf("Dual Source Blending Support : %s\n", gNY[fDualSourceBlendingSupport]);
|
||||
|
||||
r.appendf("Shader Float Precisions (varies: %s):\n", gNY[fShaderPrecisionVaries]);
|
||||
|
||||
for (int s = 0; s < kGrShaderTypeCount; ++s) {
|
||||
GrShaderType shaderType = static_cast<GrShaderType>(s);
|
||||
r.appendf("\t%s:\n", shader_type_to_string(shaderType));
|
||||
for (int p = 0; p < kGrSLPrecisionCount; ++p) {
|
||||
if (fFloatPrecisions[s][p].supported()) {
|
||||
GrSLPrecision precision = static_cast<GrSLPrecision>(p);
|
||||
r.appendf("\t\t%s: log_low: %d log_high: %d bits: %d\n",
|
||||
precision_to_string(precision),
|
||||
fFloatPrecisions[s][p].fLogRangeLow,
|
||||
fFloatPrecisions[s][p].fLogRangeHigh,
|
||||
fFloatPrecisions[s][p].fBits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void GrDrawTargetCaps::reset() {
|
||||
fMipMapSupport = false;
|
||||
fNPOTTextureTileSupport = false;
|
||||
fTwoSidedStencilSupport = false;
|
||||
fStencilWrapOpsSupport = false;
|
||||
fShaderDerivativeSupport = false;
|
||||
fGeometryShaderSupport = false;
|
||||
fDualSourceBlendingSupport = false;
|
||||
fPathRenderingSupport = false;
|
||||
fDstReadInShaderSupport = false;
|
||||
fDiscardRenderTargetSupport = false;
|
||||
fReuseScratchTextures = true;
|
||||
fGpuTracingSupport = false;
|
||||
@ -566,8 +642,6 @@ void GrDrawTargetCaps::reset() {
|
||||
fMaxTextureSize = 0;
|
||||
fMaxSampleCount = 0;
|
||||
|
||||
fShaderPrecisionVaries = false;
|
||||
|
||||
memset(fConfigRenderSupport, 0, sizeof(fConfigRenderSupport));
|
||||
memset(fConfigTextureSupport, 0, sizeof(fConfigTextureSupport));
|
||||
}
|
||||
@ -577,11 +651,6 @@ GrDrawTargetCaps& GrDrawTargetCaps::operator=(const GrDrawTargetCaps& other) {
|
||||
fNPOTTextureTileSupport = other.fNPOTTextureTileSupport;
|
||||
fTwoSidedStencilSupport = other.fTwoSidedStencilSupport;
|
||||
fStencilWrapOpsSupport = other.fStencilWrapOpsSupport;
|
||||
fShaderDerivativeSupport = other.fShaderDerivativeSupport;
|
||||
fGeometryShaderSupport = other.fGeometryShaderSupport;
|
||||
fDualSourceBlendingSupport = other.fDualSourceBlendingSupport;
|
||||
fPathRenderingSupport = other.fPathRenderingSupport;
|
||||
fDstReadInShaderSupport = other.fDstReadInShaderSupport;
|
||||
fDiscardRenderTargetSupport = other.fDiscardRenderTargetSupport;
|
||||
fReuseScratchTextures = other.fReuseScratchTextures;
|
||||
fGpuTracingSupport = other.fGpuTracingSupport;
|
||||
@ -600,12 +669,6 @@ GrDrawTargetCaps& GrDrawTargetCaps::operator=(const GrDrawTargetCaps& other) {
|
||||
memcpy(fConfigRenderSupport, other.fConfigRenderSupport, sizeof(fConfigRenderSupport));
|
||||
memcpy(fConfigTextureSupport, other.fConfigTextureSupport, sizeof(fConfigTextureSupport));
|
||||
|
||||
fShaderPrecisionVaries = other.fShaderPrecisionVaries;
|
||||
for (int s = 0; s < kGrShaderTypeCount; ++s) {
|
||||
for (int p = 0; p < kGrSLPrecisionCount; ++p) {
|
||||
fFloatPrecisions[s][p] = other.fFloatPrecisions[s][p];
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -629,30 +692,6 @@ static SkString map_flags_to_string(uint32_t flags) {
|
||||
return str;
|
||||
}
|
||||
|
||||
static const char* shader_type_to_string(GrShaderType type) {
|
||||
switch (type) {
|
||||
case kVertex_GrShaderType:
|
||||
return "vertex";
|
||||
case kGeometry_GrShaderType:
|
||||
return "geometry";
|
||||
case kFragment_GrShaderType:
|
||||
return "fragment";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
static const char* precision_to_string(GrSLPrecision p) {
|
||||
switch (p) {
|
||||
case kLow_GrSLPrecision:
|
||||
return "low";
|
||||
case kMedium_GrSLPrecision:
|
||||
return "medium";
|
||||
case kHigh_GrSLPrecision:
|
||||
return "high";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
SkString GrDrawTargetCaps::dump() const {
|
||||
SkString r;
|
||||
static const char* gNY[] = {"NO", "YES"};
|
||||
@ -660,11 +699,6 @@ SkString GrDrawTargetCaps::dump() const {
|
||||
r.appendf("NPOT Texture Tile Support : %s\n", gNY[fNPOTTextureTileSupport]);
|
||||
r.appendf("Two Sided Stencil Support : %s\n", gNY[fTwoSidedStencilSupport]);
|
||||
r.appendf("Stencil Wrap Ops Support : %s\n", gNY[fStencilWrapOpsSupport]);
|
||||
r.appendf("Shader Derivative Support : %s\n", gNY[fShaderDerivativeSupport]);
|
||||
r.appendf("Geometry Shader Support : %s\n", gNY[fGeometryShaderSupport]);
|
||||
r.appendf("Dual Source Blending Support : %s\n", gNY[fDualSourceBlendingSupport]);
|
||||
r.appendf("Path Rendering Support : %s\n", gNY[fPathRenderingSupport]);
|
||||
r.appendf("Dst Read In Shader Support : %s\n", gNY[fDstReadInShaderSupport]);
|
||||
r.appendf("Discard Render Target Support : %s\n", gNY[fDiscardRenderTargetSupport]);
|
||||
r.appendf("Reuse Scratch Textures : %s\n", gNY[fReuseScratchTextures]);
|
||||
r.appendf("Gpu Tracing Support : %s\n", gNY[fGpuTracingSupport]);
|
||||
@ -730,23 +764,6 @@ SkString GrDrawTargetCaps::dump() const {
|
||||
gNY[fConfigTextureSupport[i]]);
|
||||
}
|
||||
|
||||
r.appendf("Shader Float Precisions (varies: %s):\n", gNY[fShaderPrecisionVaries]);
|
||||
|
||||
for (int s = 0; s < kGrShaderTypeCount; ++s) {
|
||||
GrShaderType shaderType = static_cast<GrShaderType>(s);
|
||||
r.appendf("\t%s:\n", shader_type_to_string(shaderType));
|
||||
for (int p = 0; p < kGrSLPrecisionCount; ++p) {
|
||||
if (fFloatPrecisions[s][p].supported()) {
|
||||
GrSLPrecision precision = static_cast<GrSLPrecision>(p);
|
||||
r.appendf("\t\t%s: log_low: %d log_high: %d bits: %d\n",
|
||||
precision_to_string(precision),
|
||||
fFloatPrecisions[s][p].fLogRangeLow,
|
||||
fFloatPrecisions[s][p].fLogRangeHigh,
|
||||
fFloatPrecisions[s][p].fBits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -14,12 +14,9 @@
|
||||
#include "SkRefCnt.h"
|
||||
#include "SkString.h"
|
||||
|
||||
/**
|
||||
* Represents the draw target capabilities.
|
||||
*/
|
||||
class GrDrawTargetCaps : public SkRefCnt {
|
||||
class GrShaderCaps : public SkRefCnt {
|
||||
public:
|
||||
SK_DECLARE_INST_COUNT(GrDrawTargetCaps)
|
||||
SK_DECLARE_INST_COUNT(GrShaderCaps)
|
||||
|
||||
/** Info about shader variable precision within a given shader stage. That is, this info
|
||||
is relevant to a float (or vecNf) variable declared with a GrSLPrecision
|
||||
@ -44,19 +41,76 @@ public:
|
||||
int fLogRangeLow;
|
||||
/** floor(log2(|max_value|)) */
|
||||
int fLogRangeHigh;
|
||||
/** Number of bits of precision. As defined in OpenGL (with names modified to reflect this
|
||||
/** Number of bits of precision. As defined in OpenGL (with names modified to reflect this
|
||||
struct) :
|
||||
"""
|
||||
If the smallest representable value greater than 1 is 1 + e, then fBits will
|
||||
contain floor(log2(e)), and every value in the range [2^fLogRangeLow,
|
||||
2^fLogRangeHigh] can be represented to at least one part in 2^fBits.
|
||||
"""
|
||||
If the smallest representable value greater than 1 is 1 + e, then fBits will
|
||||
contain floor(log2(e)), and every value in the range [2^fLogRangeLow,
|
||||
2^fLogRangeHigh] can be represented to at least one part in 2^fBits.
|
||||
"""
|
||||
*/
|
||||
int fBits;
|
||||
};
|
||||
|
||||
GrShaderCaps() {
|
||||
this->reset();
|
||||
}
|
||||
virtual ~GrShaderCaps() {}
|
||||
GrShaderCaps(const GrShaderCaps& other) : INHERITED() {
|
||||
*this = other;
|
||||
}
|
||||
GrShaderCaps& operator= (const GrShaderCaps&);
|
||||
|
||||
virtual void reset();
|
||||
virtual SkString dump() const;
|
||||
|
||||
bool shaderDerivativeSupport() const { return fShaderDerivativeSupport; }
|
||||
bool geometryShaderSupport() const { return fGeometryShaderSupport; }
|
||||
bool pathRenderingSupport() const { return fPathRenderingSupport; }
|
||||
bool dstReadInShaderSupport() const { return fDstReadInShaderSupport; }
|
||||
bool dualSourceBlendingSupport() const { return fDualSourceBlendingSupport; }
|
||||
|
||||
/**
|
||||
* Get the precision info for a variable of type kFloat_GrSLType, kVec2f_GrSLType, etc in a
|
||||
* given shader type. If the shader type is not supported or the precision level is not
|
||||
* supported in that shader type then the returned struct will report false when supported() is
|
||||
* called.
|
||||
*/
|
||||
const PrecisionInfo& getFloatShaderPrecisionInfo(GrShaderType shaderType,
|
||||
GrSLPrecision precision) const {
|
||||
return fFloatPrecisions[shaderType][precision];
|
||||
};
|
||||
|
||||
/**
|
||||
* Is there any difference between the float shader variable precision types? If this is true
|
||||
* then unless the shader type is not supported, any call to getFloatShaderPrecisionInfo() would
|
||||
* report the same info for all precisions in all shader types.
|
||||
*/
|
||||
bool floatPrecisionVaries() const { return fShaderPrecisionVaries; }
|
||||
|
||||
protected:
|
||||
bool fShaderDerivativeSupport : 1;
|
||||
bool fGeometryShaderSupport : 1;
|
||||
bool fPathRenderingSupport : 1;
|
||||
bool fDstReadInShaderSupport : 1;
|
||||
bool fDualSourceBlendingSupport : 1;
|
||||
|
||||
bool fShaderPrecisionVaries;
|
||||
PrecisionInfo fFloatPrecisions[kGrShaderTypeCount][kGrSLPrecisionCount];
|
||||
|
||||
private:
|
||||
typedef SkRefCnt INHERITED;
|
||||
};
|
||||
|
||||
/**
|
||||
* Represents the draw target capabilities.
|
||||
*/
|
||||
class GrDrawTargetCaps : public SkRefCnt {
|
||||
public:
|
||||
SK_DECLARE_INST_COUNT(GrDrawTargetCaps)
|
||||
|
||||
GrDrawTargetCaps() {
|
||||
fShaderCaps.reset(NULL);
|
||||
this->reset();
|
||||
}
|
||||
GrDrawTargetCaps(const GrDrawTargetCaps& other) : INHERITED() {
|
||||
@ -67,17 +121,14 @@ public:
|
||||
virtual void reset();
|
||||
virtual SkString dump() const;
|
||||
|
||||
GrShaderCaps* shaderCaps() const { return fShaderCaps; }
|
||||
|
||||
bool npotTextureTileSupport() const { return fNPOTTextureTileSupport; }
|
||||
/** To avoid as-yet-unnecessary complexity we don't allow any partial support of MIP Maps (e.g.
|
||||
only for POT textures) */
|
||||
bool mipMapSupport() const { return fMipMapSupport; }
|
||||
bool twoSidedStencilSupport() const { return fTwoSidedStencilSupport; }
|
||||
bool stencilWrapOpsSupport() const { return fStencilWrapOpsSupport; }
|
||||
bool shaderDerivativeSupport() const { return fShaderDerivativeSupport; }
|
||||
bool geometryShaderSupport() const { return fGeometryShaderSupport; }
|
||||
bool dualSourceBlendingSupport() const { return fDualSourceBlendingSupport; }
|
||||
bool pathRenderingSupport() const { return fPathRenderingSupport; }
|
||||
bool dstReadInShaderSupport() const { return fDstReadInShaderSupport; }
|
||||
bool discardRenderTargetSupport() const { return fDiscardRenderTargetSupport; }
|
||||
#if GR_FORCE_GPU_TRACE_DEBUGGING
|
||||
bool gpuTracingSupport() const { return true; }
|
||||
@ -125,34 +176,13 @@ public:
|
||||
return fConfigTextureSupport[config];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the precision info for a variable of type kFloat_GrSLType, kVec2f_GrSLType, etc in a
|
||||
* given shader type. If the shader type is not supported or the precision level is not
|
||||
* supported in that shader type then the returned struct will report false when supported() is
|
||||
* called.
|
||||
*/
|
||||
const PrecisionInfo& getFloatShaderPrecisionInfo(GrShaderType shaderType,
|
||||
GrSLPrecision precision) const {
|
||||
return fFloatPrecisions[shaderType][precision];
|
||||
};
|
||||
|
||||
/**
|
||||
* Is there any difference between the float shader variable precision types? If this is true
|
||||
* then unless the shader type is not supported, any call to getFloatShaderPrecisionInfo() would
|
||||
* report the same info for all precisions in all shader types.
|
||||
*/
|
||||
bool floatPrecisionVaries() const { return fShaderPrecisionVaries; }
|
||||
|
||||
protected:
|
||||
SkAutoTUnref<GrShaderCaps> fShaderCaps;
|
||||
|
||||
bool fNPOTTextureTileSupport : 1;
|
||||
bool fMipMapSupport : 1;
|
||||
bool fTwoSidedStencilSupport : 1;
|
||||
bool fStencilWrapOpsSupport : 1;
|
||||
bool fShaderDerivativeSupport : 1;
|
||||
bool fGeometryShaderSupport : 1;
|
||||
bool fDualSourceBlendingSupport : 1;
|
||||
bool fPathRenderingSupport : 1;
|
||||
bool fDstReadInShaderSupport : 1;
|
||||
bool fDiscardRenderTargetSupport : 1;
|
||||
bool fReuseScratchTextures : 1;
|
||||
bool fGpuTracingSupport : 1;
|
||||
@ -172,9 +202,6 @@ protected:
|
||||
bool fConfigRenderSupport[kGrPixelConfigCnt][2];
|
||||
bool fConfigTextureSupport[kGrPixelConfigCnt];
|
||||
|
||||
bool fShaderPrecisionVaries;
|
||||
PrecisionInfo fFloatPrecisions[kGrShaderTypeCount][kGrSLPrecisionCount];
|
||||
|
||||
private:
|
||||
typedef SkRefCnt INHERITED;
|
||||
};
|
||||
|
@ -669,7 +669,7 @@ bool GrOvalRenderer::drawOval(GrDrawTarget* target,
|
||||
if (SkScalarNearlyEqual(oval.width(), oval.height()) && circle_stays_circle(viewMatrix)) {
|
||||
this->drawCircle(target, pipelineBuilder, color, viewMatrix, useCoverageAA, oval, stroke);
|
||||
// if we have shader derivative support, render as device-independent
|
||||
} else if (target->caps()->shaderDerivativeSupport()) {
|
||||
} else if (target->caps()->shaderCaps()->shaderDerivativeSupport()) {
|
||||
return this->drawDIEllipse(target, pipelineBuilder, color, viewMatrix, useCoverageAA, oval,
|
||||
stroke);
|
||||
// otherwise axis-aligned ellipses only
|
||||
|
@ -67,8 +67,7 @@ private:
|
||||
};
|
||||
|
||||
class GrIndexBufferAllocPool;
|
||||
class GrGLCaps;
|
||||
typedef GrGLCaps GrGLSLCaps;
|
||||
class GrGLSLCaps;
|
||||
class GrGLPrimitiveProcessor;
|
||||
class GrVertexBufferAllocPool;
|
||||
|
||||
|
@ -35,7 +35,7 @@ static GrPathRendering::FillType convert_skpath_filltype(SkPath::FillType fill)
|
||||
GrPathRenderer* GrStencilAndCoverPathRenderer::Create(GrContext* context) {
|
||||
SkASSERT(context);
|
||||
SkASSERT(context->getGpu());
|
||||
if (context->getGpu()->caps()->pathRenderingSupport()) {
|
||||
if (context->getGpu()->caps()->shaderCaps()->pathRenderingSupport()) {
|
||||
return SkNEW_ARGS(GrStencilAndCoverPathRenderer, (context->getGpu()));
|
||||
} else {
|
||||
return NULL;
|
||||
@ -43,7 +43,7 @@ GrPathRenderer* GrStencilAndCoverPathRenderer::Create(GrContext* context) {
|
||||
}
|
||||
|
||||
GrStencilAndCoverPathRenderer::GrStencilAndCoverPathRenderer(GrGpu* gpu) {
|
||||
SkASSERT(gpu->caps()->pathRenderingSupport());
|
||||
SkASSERT(gpu->caps()->shaderCaps()->pathRenderingSupport());
|
||||
fGpu = gpu;
|
||||
gpu->ref();
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ GrXferProcessor* GrXPFactory::createXferProcessor(const GrProcOptInfo& colorPOI,
|
||||
const GrDrawTargetCaps& caps) const {
|
||||
#ifdef SK_DEBUG
|
||||
if (this->willReadDstColor(caps, colorPOI, coveragePOI)) {
|
||||
if (!caps.dstReadInShaderSupport()) {
|
||||
if (!caps.shaderCaps()->dstReadInShaderSupport()) {
|
||||
SkASSERT(dstCopy && dstCopy->texture());
|
||||
} else {
|
||||
SkASSERT(!dstCopy || !dstCopy->texture());
|
||||
@ -54,6 +54,7 @@ GrXferProcessor* GrXPFactory::createXferProcessor(const GrProcOptInfo& colorPOI,
|
||||
|
||||
bool GrXPFactory::willNeedDstCopy(const GrDrawTargetCaps& caps, const GrProcOptInfo& colorPOI,
|
||||
const GrProcOptInfo& coveragePOI) const {
|
||||
return (this->willReadDstColor(caps, colorPOI, coveragePOI) && !caps.dstReadInShaderSupport());
|
||||
return (this->willReadDstColor(caps, colorPOI, coveragePOI)
|
||||
&& !caps.shaderCaps()->dstReadInShaderSupport());
|
||||
}
|
||||
|
||||
|
@ -66,14 +66,14 @@ public:
|
||||
uint8_t coverage = 0xff) {
|
||||
switch (edgeType) {
|
||||
case kFillAA_GrProcessorEdgeType:
|
||||
if (!caps.shaderDerivativeSupport()) {
|
||||
if (!caps.shaderCaps()->shaderDerivativeSupport()) {
|
||||
return NULL;
|
||||
}
|
||||
return SkNEW_ARGS(GrConicEffect, (color, viewMatrix, coverage,
|
||||
kFillAA_GrProcessorEdgeType,
|
||||
localMatrix));
|
||||
case kHairlineAA_GrProcessorEdgeType:
|
||||
if (!caps.shaderDerivativeSupport()) {
|
||||
if (!caps.shaderCaps()->shaderDerivativeSupport()) {
|
||||
return NULL;
|
||||
}
|
||||
return SkNEW_ARGS(GrConicEffect, (color, viewMatrix, coverage,
|
||||
@ -151,14 +151,14 @@ public:
|
||||
uint8_t coverage = 0xff) {
|
||||
switch (edgeType) {
|
||||
case kFillAA_GrProcessorEdgeType:
|
||||
if (!caps.shaderDerivativeSupport()) {
|
||||
if (!caps.shaderCaps()->shaderDerivativeSupport()) {
|
||||
return NULL;
|
||||
}
|
||||
return SkNEW_ARGS(GrQuadEffect, (color, viewMatrix, coverage,
|
||||
kFillAA_GrProcessorEdgeType,
|
||||
localMatrix));
|
||||
case kHairlineAA_GrProcessorEdgeType:
|
||||
if (!caps.shaderDerivativeSupport()) {
|
||||
if (!caps.shaderCaps()->shaderDerivativeSupport()) {
|
||||
return NULL;
|
||||
}
|
||||
return SkNEW_ARGS(GrQuadEffect, (color, viewMatrix, coverage,
|
||||
@ -236,12 +236,12 @@ public:
|
||||
const GrDrawTargetCaps& caps) {
|
||||
switch (edgeType) {
|
||||
case kFillAA_GrProcessorEdgeType:
|
||||
if (!caps.shaderDerivativeSupport()) {
|
||||
if (!caps.shaderCaps()->shaderDerivativeSupport()) {
|
||||
return NULL;
|
||||
}
|
||||
return SkNEW_ARGS(GrCubicEffect, (color, viewMatrix, kFillAA_GrProcessorEdgeType));
|
||||
case kHairlineAA_GrProcessorEdgeType:
|
||||
if (!caps.shaderDerivativeSupport()) {
|
||||
if (!caps.shaderCaps()->shaderDerivativeSupport()) {
|
||||
return NULL;
|
||||
}
|
||||
return SkNEW_ARGS(GrCubicEffect, (color, viewMatrix,
|
||||
|
@ -351,7 +351,7 @@ void PorterDuffXferProcessor::calcOutputTypes(GrXferProcessor::OptFlags optFlags
|
||||
// blending if we have any effective coverage stages OR the geometry processor doesn't emits
|
||||
// solid coverage.
|
||||
if (!(optFlags & kSetCoverageDrawing_OptFlag) && !hasSolidCoverage) {
|
||||
if (caps.dualSourceBlendingSupport()) {
|
||||
if (caps.shaderCaps()->dualSourceBlendingSupport()) {
|
||||
if (kZero_GrBlendCoeff == fDstBlend) {
|
||||
// write the coverage value to second color
|
||||
fSecondaryOutputType = kCoverage_SecondaryOutputType;
|
||||
@ -668,7 +668,7 @@ bool GrPorterDuffXPFactory::willReadDstColor(const GrDrawTargetCaps& caps,
|
||||
const GrProcOptInfo& colorPOI,
|
||||
const GrProcOptInfo& coveragePOI) const {
|
||||
// We can always blend correctly if we have dual source blending.
|
||||
if (caps.dualSourceBlendingSupport()) {
|
||||
if (caps.shaderCaps()->dualSourceBlendingSupport()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -47,14 +47,12 @@ void GrGLCaps::reset() {
|
||||
fUseNonVBOVertexAndIndexDynamicData = false;
|
||||
fIsCoreProfile = false;
|
||||
fFullClearIsFree = false;
|
||||
fDropsTileOnZeroDivide = false;
|
||||
fFBFetchSupport = false;
|
||||
fFBFetchNeedsCustomOutput = false;
|
||||
fFBFetchColorName = NULL;
|
||||
fFBFetchExtensionString = NULL;
|
||||
fFBMixedSamplesSupport = false;
|
||||
|
||||
fReadPixelsSupportedCache.reset();
|
||||
|
||||
fShaderCaps.reset(SkNEW(GrGLSLCaps));
|
||||
|
||||
}
|
||||
|
||||
GrGLCaps::GrGLCaps(const GrGLCaps& caps) : GrDrawTargetCaps() {
|
||||
@ -91,13 +89,11 @@ GrGLCaps& GrGLCaps::operator= (const GrGLCaps& caps) {
|
||||
fUseNonVBOVertexAndIndexDynamicData = caps.fUseNonVBOVertexAndIndexDynamicData;
|
||||
fIsCoreProfile = caps.fIsCoreProfile;
|
||||
fFullClearIsFree = caps.fFullClearIsFree;
|
||||
fDropsTileOnZeroDivide = caps.fDropsTileOnZeroDivide;
|
||||
fFBFetchSupport = caps.fFBFetchSupport;
|
||||
fFBFetchNeedsCustomOutput = caps.fFBFetchNeedsCustomOutput;
|
||||
fFBFetchColorName = caps.fFBFetchColorName;
|
||||
fFBFetchExtensionString = caps.fFBFetchExtensionString;
|
||||
fFBMixedSamplesSupport = caps.fFBMixedSamplesSupport;
|
||||
|
||||
*(reinterpret_cast<GrGLSLCaps*>(fShaderCaps.get())) =
|
||||
*(reinterpret_cast<GrGLSLCaps*>(caps.fShaderCaps.get()));
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -253,30 +249,6 @@ bool GrGLCaps::init(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli) {
|
||||
fES2CompatibilitySupport = true;
|
||||
}
|
||||
|
||||
if (kGLES_GrGLStandard == standard) {
|
||||
if (ctxInfo.hasExtension("GL_EXT_shader_framebuffer_fetch")) {
|
||||
fFBFetchNeedsCustomOutput = (version >= GR_GL_VER(3, 0));
|
||||
fFBFetchSupport = true;
|
||||
fFBFetchColorName = "gl_LastFragData[0]";
|
||||
fFBFetchExtensionString = "GL_EXT_shader_framebuffer_fetch";
|
||||
} else if (ctxInfo.hasExtension("GL_NV_shader_framebuffer_fetch")) {
|
||||
// Actually, we haven't seen an ES3.0 device with this extension yet, so we don't know
|
||||
fFBFetchNeedsCustomOutput = false;
|
||||
fFBFetchSupport = true;
|
||||
fFBFetchColorName = "gl_LastFragData[0]";
|
||||
fFBFetchExtensionString = "GL_NV_shader_framebuffer_fetch";
|
||||
} else if (ctxInfo.hasExtension("GL_ARM_shader_framebuffer_fetch")) {
|
||||
// The arm extension also requires an additional flag which we will set onResetContext
|
||||
fFBFetchNeedsCustomOutput = false;
|
||||
fFBFetchSupport = true;
|
||||
fFBFetchColorName = "gl_LastFragColorARM";
|
||||
fFBFetchExtensionString = "GL_ARM_shader_framebuffer_fetch";
|
||||
}
|
||||
}
|
||||
|
||||
// Adreno GPUs have a tendency to drop tiles when there is a divide-by-zero in a shader
|
||||
fDropsTileOnZeroDivide = kQualcomm_GrGLVendor == ctxInfo.vendor();
|
||||
|
||||
this->initFSAASupport(ctxInfo, gli);
|
||||
this->initStencilFormats(ctxInfo);
|
||||
|
||||
@ -344,46 +316,14 @@ bool GrGLCaps::init(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli) {
|
||||
// attachment, hence this min:
|
||||
fMaxRenderTargetSize = SkTMin(fMaxTextureSize, fMaxRenderTargetSize);
|
||||
|
||||
fPathRenderingSupport = ctxInfo.hasExtension("GL_NV_path_rendering");
|
||||
|
||||
if (fPathRenderingSupport) {
|
||||
if (kGL_GrGLStandard == standard) {
|
||||
// We only support v1.3+ of GL_NV_path_rendering which allows us to
|
||||
// set individual fragment inputs with ProgramPathFragmentInputGen. The API
|
||||
// additions are detected by checking the existence of the function.
|
||||
fPathRenderingSupport = ctxInfo.hasExtension("GL_EXT_direct_state_access") &&
|
||||
((ctxInfo.version() >= GR_GL_VER(4,3) ||
|
||||
ctxInfo.hasExtension("GL_ARB_program_interface_query")) &&
|
||||
gli->fFunctions.fProgramPathFragmentInputGen);
|
||||
} else {
|
||||
fPathRenderingSupport = ctxInfo.version() >= GR_GL_VER(3,1);
|
||||
}
|
||||
}
|
||||
|
||||
fFBMixedSamplesSupport = ctxInfo.hasExtension("GL_NV_framebuffer_mixed_samples");
|
||||
|
||||
fGpuTracingSupport = ctxInfo.hasExtension("GL_EXT_debug_marker");
|
||||
|
||||
// For now these two are equivalent but we could have dst read in shader via some other method
|
||||
fDstReadInShaderSupport = fFBFetchSupport;
|
||||
|
||||
// Disable scratch texture reuse on Mali and Adreno devices
|
||||
fReuseScratchTextures = kARM_GrGLVendor != ctxInfo.vendor() &&
|
||||
kQualcomm_GrGLVendor != ctxInfo.vendor();
|
||||
|
||||
// Enable supported shader-related caps
|
||||
if (kGL_GrGLStandard == standard) {
|
||||
fDualSourceBlendingSupport = ctxInfo.version() >= GR_GL_VER(3,3) ||
|
||||
ctxInfo.hasExtension("GL_ARB_blend_func_extended");
|
||||
fShaderDerivativeSupport = true;
|
||||
// we don't support GL_ARB_geometry_shader4, just GL 3.2+ GS
|
||||
fGeometryShaderSupport = ctxInfo.version() >= GR_GL_VER(3,2) &&
|
||||
ctxInfo.glslGeneration() >= k150_GrGLSLGeneration;
|
||||
} else {
|
||||
fShaderDerivativeSupport = ctxInfo.version() >= GR_GL_VER(3, 0) ||
|
||||
ctxInfo.hasExtension("GL_OES_standard_derivatives");
|
||||
}
|
||||
|
||||
if (GrGLCaps::kES_IMG_MsToTexture_MSFBOType == fMSFBOType) {
|
||||
GR_GL_GetIntegerv(gli, GR_GL_MAX_SAMPLES_IMG, &fMaxSampleCount);
|
||||
} else if (GrGLCaps::kNone_MSFBOType != fMSFBOType) {
|
||||
@ -412,7 +352,7 @@ bool GrGLCaps::init(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli) {
|
||||
this->initConfigTexturableTable(ctxInfo, gli);
|
||||
this->initConfigRenderableTable(ctxInfo);
|
||||
|
||||
this->initShaderPrecisionTable(ctxInfo, gli);
|
||||
reinterpret_cast<GrGLSLCaps*>(fShaderCaps.get())->init(ctxInfo, gli);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -835,85 +775,6 @@ void GrGLCaps::initStencilFormats(const GrGLContextInfo& ctxInfo) {
|
||||
fStencilVerifiedColorConfigs.push_back_n(fStencilFormats.count());
|
||||
}
|
||||
|
||||
static GrGLenum precision_to_gl_float_type(GrSLPrecision p) {
|
||||
switch (p) {
|
||||
case kLow_GrSLPrecision:
|
||||
return GR_GL_LOW_FLOAT;
|
||||
case kMedium_GrSLPrecision:
|
||||
return GR_GL_MEDIUM_FLOAT;
|
||||
case kHigh_GrSLPrecision:
|
||||
return GR_GL_HIGH_FLOAT;
|
||||
}
|
||||
SkFAIL("Unknown precision.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
static GrGLenum shader_type_to_gl_shader(GrShaderType type) {
|
||||
switch (type) {
|
||||
case kVertex_GrShaderType:
|
||||
return GR_GL_VERTEX_SHADER;
|
||||
case kGeometry_GrShaderType:
|
||||
return GR_GL_GEOMETRY_SHADER;
|
||||
case kFragment_GrShaderType:
|
||||
return GR_GL_FRAGMENT_SHADER;
|
||||
}
|
||||
SkFAIL("Unknown shader type.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
void GrGLCaps::initShaderPrecisionTable(const GrGLContextInfo& ctxInfo, const GrGLInterface* intf) {
|
||||
if (kGLES_GrGLStandard == ctxInfo.standard() || ctxInfo.version() >= GR_GL_VER(4,1) ||
|
||||
ctxInfo.hasExtension("GL_ARB_ES2_compatibility")) {
|
||||
for (int s = 0; s < kGrShaderTypeCount; ++s) {
|
||||
if (kGeometry_GrShaderType != s) {
|
||||
GrShaderType shaderType = static_cast<GrShaderType>(s);
|
||||
GrGLenum glShader = shader_type_to_gl_shader(shaderType);
|
||||
PrecisionInfo* first = NULL;
|
||||
fShaderPrecisionVaries = false;
|
||||
for (int p = 0; p < kGrSLPrecisionCount; ++p) {
|
||||
GrSLPrecision precision = static_cast<GrSLPrecision>(p);
|
||||
GrGLenum glPrecision = precision_to_gl_float_type(precision);
|
||||
GrGLint range[2];
|
||||
GrGLint bits;
|
||||
GR_GL_GetShaderPrecisionFormat(intf, glShader, glPrecision, range, &bits);
|
||||
if (bits) {
|
||||
fFloatPrecisions[s][p].fLogRangeLow = range[0];
|
||||
fFloatPrecisions[s][p].fLogRangeHigh = range[1];
|
||||
fFloatPrecisions[s][p].fBits = bits;
|
||||
if (!first) {
|
||||
first = &fFloatPrecisions[s][p];
|
||||
} else if (!fShaderPrecisionVaries) {
|
||||
fShaderPrecisionVaries = (*first != fFloatPrecisions[s][p]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// We're on a desktop GL that doesn't have precision info. Assume they're all 32bit float.
|
||||
fShaderPrecisionVaries = false;
|
||||
for (int s = 0; s < kGrShaderTypeCount; ++s) {
|
||||
if (kGeometry_GrShaderType != s) {
|
||||
for (int p = 0; p < kGrSLPrecisionCount; ++p) {
|
||||
fFloatPrecisions[s][p].fLogRangeLow = 127;
|
||||
fFloatPrecisions[s][p].fLogRangeHigh = 127;
|
||||
fFloatPrecisions[s][p].fBits = 23;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// GetShaderPrecisionFormat doesn't accept GL_GEOMETRY_SHADER as a shader type. Assume they're
|
||||
// the same as the vertex shader. Only fragment shaders were ever allowed to omit support for
|
||||
// highp. GS was added after GetShaderPrecisionFormat was added to the list of features that
|
||||
// are recommended against.
|
||||
if (fGeometryShaderSupport) {
|
||||
for (int p = 0; p < kGrSLPrecisionCount; ++p) {
|
||||
fFloatPrecisions[kGeometry_GrShaderType][p] = fFloatPrecisions[kVertex_GrShaderType][p];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void GrGLCaps::markColorConfigAndStencilFormatAsVerified(
|
||||
GrPixelConfig config,
|
||||
const GrGLStencilAttachment::Format& format) {
|
||||
@ -1013,7 +874,6 @@ SkString GrGLCaps::dump() const {
|
||||
|
||||
r.appendf("Core Profile: %s\n", (fIsCoreProfile ? "YES" : "NO"));
|
||||
r.appendf("MSAA Type: %s\n", kMSFBOExtStr[fMSFBOType]);
|
||||
r.appendf("FB Fetch Support: %s\n", (fFBFetchSupport ? "YES" : "NO"));
|
||||
r.appendf("Invalidate FB Type: %s\n", kInvalidateFBTypeStr[fInvalidateFBType]);
|
||||
r.appendf("Map Buffer Type: %s\n", kMapBufferTypeStr[fMapBufferType]);
|
||||
r.appendf("Max FS Uniform Vectors: %d\n", fMaxFragmentUniformVectors);
|
||||
@ -1038,6 +898,214 @@ SkString GrGLCaps::dump() const {
|
||||
r.appendf("Use non-VBO for dynamic data: %s\n",
|
||||
(fUseNonVBOVertexAndIndexDynamicData ? "YES" : "NO"));
|
||||
r.appendf("Full screen clear is free: %s\n", (fFullClearIsFree ? "YES" : "NO"));
|
||||
return r;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
GrGLSLCaps::GrGLSLCaps() {
|
||||
this->reset();
|
||||
}
|
||||
|
||||
|
||||
void GrGLSLCaps::reset() {
|
||||
INHERITED::reset();
|
||||
|
||||
fDropsTileOnZeroDivide = false;
|
||||
fFBFetchSupport = false;
|
||||
fFBFetchNeedsCustomOutput = false;
|
||||
fFBFetchColorName = NULL;
|
||||
fFBFetchExtensionString = NULL;
|
||||
}
|
||||
|
||||
GrGLSLCaps::GrGLSLCaps(const GrGLSLCaps& caps) : GrShaderCaps() {
|
||||
*this = caps;
|
||||
}
|
||||
|
||||
GrGLSLCaps& GrGLSLCaps::operator= (const GrGLSLCaps& caps) {
|
||||
INHERITED::operator=(caps);
|
||||
fDropsTileOnZeroDivide = caps.fDropsTileOnZeroDivide;
|
||||
fFBFetchSupport = caps.fFBFetchSupport;
|
||||
fFBFetchNeedsCustomOutput = caps.fFBFetchNeedsCustomOutput;
|
||||
fFBFetchColorName = caps.fFBFetchColorName;
|
||||
fFBFetchExtensionString = caps.fFBFetchExtensionString;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool GrGLSLCaps::init(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli) {
|
||||
this->reset();
|
||||
if (!ctxInfo.isInitialized()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
GrGLStandard standard = ctxInfo.standard();
|
||||
GrGLVersion version = ctxInfo.version();
|
||||
|
||||
/**************************************************************************
|
||||
* Caps specific to GrGLSLCaps
|
||||
**************************************************************************/
|
||||
|
||||
if (kGLES_GrGLStandard == standard) {
|
||||
if (ctxInfo.hasExtension("GL_EXT_shader_framebuffer_fetch")) {
|
||||
fFBFetchNeedsCustomOutput = (version >= GR_GL_VER(3, 0));
|
||||
fFBFetchSupport = true;
|
||||
fFBFetchColorName = "gl_LastFragData[0]";
|
||||
fFBFetchExtensionString = "GL_EXT_shader_framebuffer_fetch";
|
||||
}
|
||||
else if (ctxInfo.hasExtension("GL_NV_shader_framebuffer_fetch")) {
|
||||
// Actually, we haven't seen an ES3.0 device with this extension yet, so we don't know
|
||||
fFBFetchNeedsCustomOutput = false;
|
||||
fFBFetchSupport = true;
|
||||
fFBFetchColorName = "gl_LastFragData[0]";
|
||||
fFBFetchExtensionString = "GL_NV_shader_framebuffer_fetch";
|
||||
}
|
||||
else if (ctxInfo.hasExtension("GL_ARM_shader_framebuffer_fetch")) {
|
||||
// The arm extension also requires an additional flag which we will set onResetContext
|
||||
fFBFetchNeedsCustomOutput = false;
|
||||
fFBFetchSupport = true;
|
||||
fFBFetchColorName = "gl_LastFragColorARM";
|
||||
fFBFetchExtensionString = "GL_ARM_shader_framebuffer_fetch";
|
||||
}
|
||||
}
|
||||
|
||||
// Adreno GPUs have a tendency to drop tiles when there is a divide-by-zero in a shader
|
||||
fDropsTileOnZeroDivide = kQualcomm_GrGLVendor == ctxInfo.vendor();
|
||||
|
||||
/**************************************************************************
|
||||
* GrShaderCaps fields
|
||||
**************************************************************************/
|
||||
|
||||
fPathRenderingSupport = ctxInfo.hasExtension("GL_NV_path_rendering");
|
||||
|
||||
if (fPathRenderingSupport) {
|
||||
if (kGL_GrGLStandard == standard) {
|
||||
// We only support v1.3+ of GL_NV_path_rendering which allows us to
|
||||
// set individual fragment inputs with ProgramPathFragmentInputGen. The API
|
||||
// additions are detected by checking the existence of the function.
|
||||
fPathRenderingSupport = ctxInfo.hasExtension("GL_EXT_direct_state_access") &&
|
||||
((ctxInfo.version() >= GR_GL_VER(4, 3) ||
|
||||
ctxInfo.hasExtension("GL_ARB_program_interface_query")) &&
|
||||
gli->fFunctions.fProgramPathFragmentInputGen);
|
||||
}
|
||||
else {
|
||||
fPathRenderingSupport = ctxInfo.version() >= GR_GL_VER(3, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// For now these two are equivalent but we could have dst read in shader via some other method
|
||||
fDstReadInShaderSupport = fFBFetchSupport;
|
||||
|
||||
// Enable supported shader-related caps
|
||||
if (kGL_GrGLStandard == standard) {
|
||||
fDualSourceBlendingSupport = ctxInfo.version() >= GR_GL_VER(3, 3) ||
|
||||
ctxInfo.hasExtension("GL_ARB_blend_func_extended");
|
||||
fShaderDerivativeSupport = true;
|
||||
// we don't support GL_ARB_geometry_shader4, just GL 3.2+ GS
|
||||
fGeometryShaderSupport = ctxInfo.version() >= GR_GL_VER(3, 2) &&
|
||||
ctxInfo.glslGeneration() >= k150_GrGLSLGeneration;
|
||||
}
|
||||
else {
|
||||
fShaderDerivativeSupport = ctxInfo.version() >= GR_GL_VER(3, 0) ||
|
||||
ctxInfo.hasExtension("GL_OES_standard_derivatives");
|
||||
}
|
||||
|
||||
this->initShaderPrecisionTable(ctxInfo, gli);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
SkString GrGLSLCaps::dump() const {
|
||||
SkString r = INHERITED::dump();
|
||||
|
||||
r.appendf("--- GLSL-Specific ---\n");
|
||||
|
||||
r.appendf("FB Fetch Support: %s\n", (fFBFetchSupport ? "YES" : "NO"));
|
||||
r.appendf("Drops tile on zero divide: %s\n", (fDropsTileOnZeroDivide ? "YES" : "NO"));
|
||||
return r;
|
||||
}
|
||||
|
||||
static GrGLenum precision_to_gl_float_type(GrSLPrecision p) {
|
||||
switch (p) {
|
||||
case kLow_GrSLPrecision:
|
||||
return GR_GL_LOW_FLOAT;
|
||||
case kMedium_GrSLPrecision:
|
||||
return GR_GL_MEDIUM_FLOAT;
|
||||
case kHigh_GrSLPrecision:
|
||||
return GR_GL_HIGH_FLOAT;
|
||||
}
|
||||
SkFAIL("Unknown precision.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
static GrGLenum shader_type_to_gl_shader(GrShaderType type) {
|
||||
switch (type) {
|
||||
case kVertex_GrShaderType:
|
||||
return GR_GL_VERTEX_SHADER;
|
||||
case kGeometry_GrShaderType:
|
||||
return GR_GL_GEOMETRY_SHADER;
|
||||
case kFragment_GrShaderType:
|
||||
return GR_GL_FRAGMENT_SHADER;
|
||||
}
|
||||
SkFAIL("Unknown shader type.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
void GrGLSLCaps::initShaderPrecisionTable(const GrGLContextInfo& ctxInfo,
|
||||
const GrGLInterface* intf) {
|
||||
if (kGLES_GrGLStandard == ctxInfo.standard() || ctxInfo.version() >= GR_GL_VER(4, 1) ||
|
||||
ctxInfo.hasExtension("GL_ARB_ES2_compatibility")) {
|
||||
for (int s = 0; s < kGrShaderTypeCount; ++s) {
|
||||
if (kGeometry_GrShaderType != s) {
|
||||
GrShaderType shaderType = static_cast<GrShaderType>(s);
|
||||
GrGLenum glShader = shader_type_to_gl_shader(shaderType);
|
||||
PrecisionInfo* first = NULL;
|
||||
fShaderPrecisionVaries = false;
|
||||
for (int p = 0; p < kGrSLPrecisionCount; ++p) {
|
||||
GrSLPrecision precision = static_cast<GrSLPrecision>(p);
|
||||
GrGLenum glPrecision = precision_to_gl_float_type(precision);
|
||||
GrGLint range[2];
|
||||
GrGLint bits;
|
||||
GR_GL_GetShaderPrecisionFormat(intf, glShader, glPrecision, range, &bits);
|
||||
if (bits) {
|
||||
fFloatPrecisions[s][p].fLogRangeLow = range[0];
|
||||
fFloatPrecisions[s][p].fLogRangeHigh = range[1];
|
||||
fFloatPrecisions[s][p].fBits = bits;
|
||||
if (!first) {
|
||||
first = &fFloatPrecisions[s][p];
|
||||
}
|
||||
else if (!fShaderPrecisionVaries) {
|
||||
fShaderPrecisionVaries = (*first != fFloatPrecisions[s][p]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// We're on a desktop GL that doesn't have precision info. Assume they're all 32bit float.
|
||||
fShaderPrecisionVaries = false;
|
||||
for (int s = 0; s < kGrShaderTypeCount; ++s) {
|
||||
if (kGeometry_GrShaderType != s) {
|
||||
for (int p = 0; p < kGrSLPrecisionCount; ++p) {
|
||||
fFloatPrecisions[s][p].fLogRangeLow = 127;
|
||||
fFloatPrecisions[s][p].fLogRangeHigh = 127;
|
||||
fFloatPrecisions[s][p].fBits = 23;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// GetShaderPrecisionFormat doesn't accept GL_GEOMETRY_SHADER as a shader type. Assume they're
|
||||
// the same as the vertex shader. Only fragment shaders were ever allowed to omit support for
|
||||
// highp. GS was added after GetShaderPrecisionFormat was added to the list of features that
|
||||
// are recommended against.
|
||||
if (fGeometryShaderSupport) {
|
||||
for (int p = 0; p < kGrSLPrecisionCount; ++p) {
|
||||
fFloatPrecisions[kGeometry_GrShaderType][p] = fFloatPrecisions[kVertex_GrShaderType][p];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "SkTArray.h"
|
||||
|
||||
class GrGLContextInfo;
|
||||
class GrGLSLCaps;
|
||||
|
||||
/**
|
||||
* Stores some capabilities of a GL context. Most are determined by the GL
|
||||
@ -165,19 +166,6 @@ public:
|
||||
kES_EXT_MsToTexture_MSFBOType == fMSFBOType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Some helper functions for encapsulating various extensions to read FB Buffer on openglES
|
||||
*
|
||||
* TODO(joshualitt) On desktop opengl 4.2+ we can achieve something similar to this effect
|
||||
*/
|
||||
bool fbFetchSupport() const { return fFBFetchSupport; }
|
||||
|
||||
bool fbFetchNeedsCustomOutput() const { return fFBFetchNeedsCustomOutput; }
|
||||
|
||||
const char* fbFetchColorName() const { return fFBFetchColorName; }
|
||||
|
||||
const char* fbFetchExtensionString() const { return fFBFetchExtensionString; }
|
||||
|
||||
bool fbMixedSamplesSupport() const { return fFBMixedSamplesSupport; }
|
||||
|
||||
InvalidateFBType invalidateFBType() const { return fInvalidateFBType; }
|
||||
@ -265,8 +253,6 @@ public:
|
||||
|
||||
bool fullClearIsFree() const { return fFullClearIsFree; }
|
||||
|
||||
bool dropsTileOnZeroDivide() const { return fDropsTileOnZeroDivide; }
|
||||
|
||||
/**
|
||||
* Returns a string containing the caps info.
|
||||
*/
|
||||
@ -285,6 +271,8 @@ public:
|
||||
|
||||
LATCAlias latcAlias() const { return fLATCAlias; }
|
||||
|
||||
GrGLSLCaps* glslCaps() const { return reinterpret_cast<GrGLSLCaps*>(fShaderCaps.get()); }
|
||||
|
||||
private:
|
||||
/**
|
||||
* Maintains a bit per GrPixelConfig. It is used to avoid redundantly
|
||||
@ -329,9 +317,6 @@ private:
|
||||
void initConfigRenderableTable(const GrGLContextInfo&);
|
||||
void initConfigTexturableTable(const GrGLContextInfo&, const GrGLInterface*);
|
||||
|
||||
// Must be called after fGeometryShaderSupport is initialized.
|
||||
void initShaderPrecisionTable(const GrGLContextInfo&, const GrGLInterface*);
|
||||
|
||||
bool doReadPixelsSupported(const GrGLInterface* intf, GrGLenum format, GrGLenum type) const;
|
||||
|
||||
// tracks configs that have been verified to pass the FBO completeness when
|
||||
@ -371,14 +356,8 @@ private:
|
||||
bool fUseNonVBOVertexAndIndexDynamicData : 1;
|
||||
bool fIsCoreProfile : 1;
|
||||
bool fFullClearIsFree : 1;
|
||||
bool fDropsTileOnZeroDivide : 1;
|
||||
bool fFBFetchSupport : 1;
|
||||
bool fFBFetchNeedsCustomOutput : 1;
|
||||
bool fFBMixedSamplesSupport : 1;
|
||||
|
||||
const char* fFBFetchColorName;
|
||||
const char* fFBFetchExtensionString;
|
||||
|
||||
struct ReadPixelsSupportedFormat {
|
||||
GrGLenum fFormat;
|
||||
GrGLenum fType;
|
||||
@ -395,6 +374,66 @@ private:
|
||||
typedef GrDrawTargetCaps INHERITED;
|
||||
};
|
||||
|
||||
typedef GrGLCaps GrGLSLCaps;
|
||||
|
||||
class GrGLSLCaps : public GrShaderCaps {
|
||||
public:
|
||||
SK_DECLARE_INST_COUNT(GrGLSLCaps)
|
||||
|
||||
/**
|
||||
* Creates a GrGLSLCaps that advertises no support for any extensions,
|
||||
* formats, etc. Call init to initialize from a GrGLContextInfo.
|
||||
*/
|
||||
GrGLSLCaps();
|
||||
~GrGLSLCaps() override {}
|
||||
|
||||
GrGLSLCaps(const GrGLSLCaps& caps);
|
||||
|
||||
GrGLSLCaps& operator = (const GrGLSLCaps& caps);
|
||||
|
||||
/**
|
||||
* Resets the caps such that nothing is supported.
|
||||
*/
|
||||
void reset() override;
|
||||
|
||||
/**
|
||||
* Initializes the GrGLSLCaps to the set of features supported in the current
|
||||
* OpenGL context accessible via ctxInfo.
|
||||
*/
|
||||
bool init(const GrGLContextInfo& ctxInfo, const GrGLInterface* glInterface);
|
||||
|
||||
/**
|
||||
* Some helper functions for encapsulating various extensions to read FB Buffer on openglES
|
||||
*
|
||||
* TODO(joshualitt) On desktop opengl 4.2+ we can achieve something similar to this effect
|
||||
*/
|
||||
bool fbFetchSupport() const { return fFBFetchSupport; }
|
||||
|
||||
bool fbFetchNeedsCustomOutput() const { return fFBFetchNeedsCustomOutput; }
|
||||
|
||||
const char* fbFetchColorName() const { return fFBFetchColorName; }
|
||||
|
||||
const char* fbFetchExtensionString() const { return fFBFetchExtensionString; }
|
||||
|
||||
bool dropsTileOnZeroDivide() const { return fDropsTileOnZeroDivide; }
|
||||
|
||||
/**
|
||||
* Returns a string containing the caps info.
|
||||
*/
|
||||
SkString dump() const override;
|
||||
|
||||
private:
|
||||
// Must be called after fGeometryShaderSupport is initialized.
|
||||
void initShaderPrecisionTable(const GrGLContextInfo&, const GrGLInterface*);
|
||||
|
||||
bool fDropsTileOnZeroDivide : 1;
|
||||
bool fFBFetchSupport : 1;
|
||||
bool fFBFetchNeedsCustomOutput : 1;
|
||||
|
||||
const char* fFBFetchColorName;
|
||||
const char* fFBFetchExtensionString;
|
||||
|
||||
typedef GrShaderCaps INHERITED;
|
||||
};
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -155,7 +155,7 @@ GrGLGpu::GrGLGpu(const GrGLContext& ctx, GrContext* context)
|
||||
fTempDstFBOID = 0;
|
||||
fStencilClearFBOID = 0;
|
||||
|
||||
if (this->glCaps().pathRenderingSupport()) {
|
||||
if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
|
||||
fPathRendering.reset(new GrGLPathRendering(this));
|
||||
}
|
||||
}
|
||||
@ -188,7 +188,7 @@ void GrGLGpu::contextAbandoned() {
|
||||
fTempSrcFBOID = 0;
|
||||
fTempDstFBOID = 0;
|
||||
fStencilClearFBOID = 0;
|
||||
if (this->glCaps().pathRenderingSupport()) {
|
||||
if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
|
||||
this->glPathRendering()->abandonGpuResources();
|
||||
}
|
||||
}
|
||||
@ -335,7 +335,7 @@ void GrGLGpu::onResetContext(uint32_t resetBits) {
|
||||
}
|
||||
|
||||
if (resetBits & kPathRendering_GrGLBackendState) {
|
||||
if (this->caps()->pathRenderingSupport()) {
|
||||
if (this->caps()->shaderCaps()->pathRenderingSupport()) {
|
||||
this->glPathRendering()->resetContext();
|
||||
}
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ public:
|
||||
const GrGLCaps& glCaps() const { return *fGLContext.caps(); }
|
||||
|
||||
GrGLPathRendering* glPathRendering() {
|
||||
SkASSERT(glCaps().pathRenderingSupport());
|
||||
SkASSERT(glCaps().shaderCaps()->pathRenderingSupport());
|
||||
return static_cast<GrGLPathRendering*>(pathRendering());
|
||||
}
|
||||
|
||||
|
@ -197,7 +197,7 @@ void GrGLPathRendering::drawPaths(const GrPathRange* pathRange,
|
||||
const void* indices, PathIndexType indexType,
|
||||
const float transformValues[], PathTransformType transformType,
|
||||
int count, const GrStencilSettings& stencilSettings) {
|
||||
SkASSERT(fGpu->caps()->pathRenderingSupport());
|
||||
SkASSERT(fGpu->caps()->shaderCaps()->pathRenderingSupport());
|
||||
|
||||
GrGLuint baseID = static_cast<const GrGLPathRange*>(pathRange)->basePathID();
|
||||
|
||||
@ -260,7 +260,7 @@ void GrGLPathRendering::setProjectionMatrix(const SkMatrix& matrix,
|
||||
const SkISize& renderTargetSize,
|
||||
GrSurfaceOrigin renderTargetOrigin) {
|
||||
|
||||
SkASSERT(fGpu->glCaps().pathRenderingSupport());
|
||||
SkASSERT(fGpu->glCaps().shaderCaps()->pathRenderingSupport());
|
||||
|
||||
if (renderTargetOrigin == fHWProjectionMatrixState.fRenderTargetOrigin &&
|
||||
renderTargetSize == fHWProjectionMatrixState.fRenderTargetSize &&
|
||||
|
@ -105,7 +105,7 @@ bool GrGLProgramDescBuilder::Build(GrProgramDesc* desc,
|
||||
|
||||
GrProcessorKeyBuilder b(&glDesc->key());
|
||||
|
||||
primProc.getGLProcessorKey(batchTracker, gpu->glCaps(), &b);
|
||||
primProc.getGLProcessorKey(batchTracker, *gpu->glCaps().glslCaps(), &b);
|
||||
//**** use glslCaps here?
|
||||
if (!get_meta_key(primProc, gpu->glCaps(), 0, &b)) {
|
||||
glDesc->key().reset();
|
||||
@ -115,7 +115,7 @@ bool GrGLProgramDescBuilder::Build(GrProgramDesc* desc,
|
||||
for (int s = 0; s < pipeline.numFragmentStages(); ++s) {
|
||||
const GrPendingFragmentStage& fps = pipeline.getFragmentStage(s);
|
||||
const GrFragmentProcessor& fp = *fps.processor();
|
||||
fp.getGLProcessorKey(gpu->glCaps(), &b);
|
||||
fp.getGLProcessorKey(*gpu->glCaps().glslCaps(), &b);
|
||||
//**** use glslCaps here?
|
||||
if (!get_meta_key(fp, gpu->glCaps(), primProc.getTransformKey(fp.coordTransforms()), &b)) {
|
||||
glDesc->key().reset();
|
||||
@ -124,7 +124,7 @@ bool GrGLProgramDescBuilder::Build(GrProgramDesc* desc,
|
||||
}
|
||||
|
||||
const GrXferProcessor& xp = *pipeline.getXferProcessor();
|
||||
xp.getGLProcessorKey(gpu->glCaps(), &b);
|
||||
xp.getGLProcessorKey(*gpu->glCaps().glslCaps(), &b);
|
||||
//**** use glslCaps here?
|
||||
if (!get_meta_key(xp, gpu->glCaps(), 0, &b)) {
|
||||
glDesc->key().reset();
|
||||
|
@ -39,7 +39,7 @@ static void append_default_precision_qualifier(GrSLPrecision p,
|
||||
GrGLFragmentShaderBuilder::DstReadKey
|
||||
GrGLFragmentShaderBuilder::KeyForDstRead(const GrTexture* dstCopy, const GrGLCaps& caps) {
|
||||
uint32_t key = kYesDstRead_DstReadKeyBit;
|
||||
if (caps.fbFetchSupport()) {
|
||||
if (caps.glslCaps()->fbFetchSupport()) {
|
||||
return key;
|
||||
}
|
||||
SkASSERT(dstCopy);
|
||||
@ -79,7 +79,7 @@ bool GrGLFragmentShaderBuilder::enableFeature(GLSLFeature feature) {
|
||||
switch (feature) {
|
||||
case kStandardDerivatives_GLSLFeature: {
|
||||
GrGLGpu* gpu = fProgramBuilder->gpu();
|
||||
if (!gpu->glCaps().shaderDerivativeSupport()) {
|
||||
if (!gpu->glCaps().shaderCaps()->shaderDerivativeSupport()) {
|
||||
return false;
|
||||
}
|
||||
if (kGLES_GrGLStandard == gpu->glStandard() &&
|
||||
@ -166,13 +166,13 @@ const char* GrGLFragmentShaderBuilder::dstColor() {
|
||||
fHasReadDstColor = true;
|
||||
|
||||
GrGLGpu* gpu = fProgramBuilder->gpu();
|
||||
if (gpu->glCaps().fbFetchSupport()) {
|
||||
if (gpu->glCaps().glslCaps()->fbFetchSupport()) {
|
||||
this->addFeature(1 << (GrGLFragmentShaderBuilder::kLastGLSLPrivateFeature + 1),
|
||||
gpu->glCaps().fbFetchExtensionString());
|
||||
gpu->glCaps().glslCaps()->fbFetchExtensionString());
|
||||
|
||||
// Some versions of this extension string require declaring custom color output on ES 3.0+
|
||||
const char* fbFetchColorName = gpu->glCaps().fbFetchColorName();
|
||||
if (gpu->glCaps().fbFetchNeedsCustomOutput()) {
|
||||
const char* fbFetchColorName = gpu->glCaps().glslCaps()->fbFetchColorName();
|
||||
if (gpu->glCaps().glslCaps()->fbFetchNeedsCustomOutput()) {
|
||||
this->enableCustomOutput();
|
||||
fOutputs[fCustomColorOutputIndex].setTypeModifier(GrShaderVar::kInOut_TypeModifier);
|
||||
fbFetchColorName = declared_color_output_name();
|
||||
|
@ -75,7 +75,7 @@ GrGLProgram* GrGLProgramBuilder::CreateProgram(const DrawArgs& args, GrGLGpu* gp
|
||||
GrGLProgramBuilder* GrGLProgramBuilder::CreateProgramBuilder(const DrawArgs& args,
|
||||
GrGLGpu* gpu) {
|
||||
if (args.fPrimitiveProcessor->isPathRendering()) {
|
||||
SkASSERT(gpu->glCaps().pathRenderingSupport() &&
|
||||
SkASSERT(gpu->glCaps().shaderCaps()->pathRenderingSupport() &&
|
||||
!args.fPrimitiveProcessor->willUseGeoShader() &&
|
||||
args.fPrimitiveProcessor->numAttribs() == 0);
|
||||
return SkNEW_ARGS(GrGLNvprProgramBuilder, (gpu, args));
|
||||
@ -302,7 +302,7 @@ void GrGLProgramBuilder::emitAndInstallProc(const GrPrimitiveProcessor& gp,
|
||||
fGeometryProcessor = SkNEW(GrGLInstalledGeoProc);
|
||||
|
||||
const GrBatchTracker& bt = this->batchTracker();
|
||||
fGeometryProcessor->fGLProc.reset(gp.createGLInstance(bt, fGpu->glCaps()));
|
||||
fGeometryProcessor->fGLProc.reset(gp.createGLInstance(bt, *fGpu->glCaps().glslCaps()));
|
||||
|
||||
SkSTArray<4, GrGLProcessor::TextureSampler> samplers(gp.numTextures());
|
||||
this->emitSamplers(gp, &samplers, fGeometryProcessor);
|
||||
|
@ -254,7 +254,8 @@ bool GrDrawTarget::programUnitTest(int maxStages) {
|
||||
pipelineBuilder.setClip(clip);
|
||||
|
||||
// if path rendering we have to setup a couple of things like the draw type
|
||||
bool usePathRendering = gpu->glCaps().pathRenderingSupport() && random.nextBool();
|
||||
bool usePathRendering = gpu->glCaps().shaderCaps()->pathRenderingSupport() &&
|
||||
random.nextBool();
|
||||
|
||||
// twiddle drawstate knobs randomly
|
||||
bool hasGeometryProcessor = !usePathRendering;
|
||||
|
Loading…
Reference in New Issue
Block a user