Check for xfer barriers in GrBatch, auto-issue barriers in GrGpu

Review URL: https://codereview.chromium.org/1287973003
This commit is contained in:
bsalomon 2015-08-12 11:14:50 -07:00 committed by Commit bot
parent 6028a84765
commit cb02b38b2c
44 changed files with 170 additions and 168 deletions

View File

@ -25,9 +25,12 @@ class GrProcOptInfo;
* required after a pixel has been written, before it can be safely read again.
*/
enum GrXferBarrierType {
kTexture_GrXferBarrierType, //<! Required when a shader reads and renders to the same texture.
kBlend_GrXferBarrierType, //<! Required by certain blend extensions.
kNone_GrXferBarrierType = 0, //<! No barrier is required
kTexture_GrXferBarrierType, //<! Required when a shader reads and renders to the same texture.
kBlend_GrXferBarrierType, //<! Required by certain blend extensions.
};
/** Should be able to treat kNone as false in boolean expressions */
GR_STATIC_ASSERT(SkToBool(kNone_GrXferBarrierType) == false);
/**
* GrXferProcessor is responsible for implementing the xfer mode that blends the src color and dst
@ -148,9 +151,7 @@ public:
* Returns whether this XP will require an Xfer barrier on the given rt. If true, outBarrierType
* is updated to contain the type of barrier needed.
*/
bool willNeedXferBarrier(const GrRenderTarget* rt,
const GrCaps& caps,
GrXferBarrierType* outBarrierType) const;
GrXferBarrierType xferBarrierType(const GrRenderTarget* rt, const GrCaps& caps) const;
struct BlendInfo {
void reset() {
@ -256,13 +257,12 @@ private:
GrProcessorKeyBuilder* b) const = 0;
/**
* If not using a texture barrier, retrieves whether the subclass will require a different type
* of barrier.
* Determines the type of barrier (if any) required by the subclass. Note that the possibility
* that a kTexture type barrier is required is handled by the base class and need not be
* considered by subclass overrides of this function.
*/
virtual bool onWillNeedXferBarrier(const GrRenderTarget*,
const GrCaps&,
GrXferBarrierType* outBarrierType SK_UNUSED) const {
return false;
virtual GrXferBarrierType onXferBarrier(const GrRenderTarget*, const GrCaps&) const {
return kNone_GrXferBarrierType;
}
/**

View File

@ -940,8 +940,9 @@ private:
geometry.fViewMatrix.mapRect(&fBounds);
}
bool onCombineIfPossible(GrBatch* t) override {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}

View File

@ -492,8 +492,9 @@ private:
const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; }
bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
bool onCombineIfPossible(GrBatch* t) override {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}

View File

@ -731,8 +731,9 @@ private:
fBounds.outset(0.5f, 0.5f);
}
bool onCombineIfPossible(GrBatch* t) override {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}

View File

@ -264,8 +264,9 @@ private:
geometry.fViewMatrix.mapRect(&fBounds);
}
bool onCombineIfPossible(GrBatch* t) override {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}

View File

@ -1893,8 +1893,9 @@ private:
bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
int numGlyphs() const { return fBatch.fNumGlyphs; }
bool onCombineIfPossible(GrBatch* t) override {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}

View File

@ -27,8 +27,7 @@ GrBufferedDrawTarget::~GrBufferedDrawTarget() {
}
void GrBufferedDrawTarget::onDrawBatch(GrBatch* batch) {
fCommands->recordXferBarrierIfNecessary(*batch->pipeline(), *this->caps());
fCommands->recordDrawBatch(batch);
fCommands->recordDrawBatch(batch, *this->caps());
}
void GrBufferedDrawTarget::onStencilPath(const GrPipelineBuilder& pipelineBuilder,
@ -131,12 +130,11 @@ GrBufferedDrawTarget::createStateForPathDraw(const GrPrimitiveProcessor* primPro
fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
*state->fPrimitiveProcessor,
state->fBatchTracker) &&
fPrevState->getPipeline()->isEqual(*state->getPipeline())) {
GrPipeline::AreEqual(*fPrevState->getPipeline(), *state->getPipeline(), false)) {
this->unallocState(state);
} else {
fPrevState.reset(state);
}
fCommands->recordXferBarrierIfNecessary(*fPrevState->getPipeline(), *this->caps());
return fPrevState;
}

View File

@ -65,20 +65,3 @@ GrTargetCommands::Cmd* GrCommandBuilder::recordCopySurface(GrSurface* dst,
GrBATCH_INFO("Recording copysurface %d\n", cs->uniqueID());
return cs;
}
GrTargetCommands::Cmd*
GrCommandBuilder::recordXferBarrierIfNecessary(const GrPipeline& pipeline,
const GrCaps& caps) {
const GrXferProcessor& xp = *pipeline.getXferProcessor();
GrRenderTarget* rt = pipeline.getRenderTarget();
GrXferBarrierType barrierType;
if (!xp.willNeedXferBarrier(rt, caps, &barrierType)) {
return NULL;
}
XferBarrier* xb = GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), XferBarrier, (rt));
xb->fBarrierType = barrierType;
GrBATCH_INFO("Recording xfer barrier %d\n", xb->uniqueID());
return xb;
}

View File

@ -28,7 +28,7 @@ public:
bool insideClip,
GrRenderTarget* renderTarget);
virtual Cmd* recordDiscard(GrRenderTarget*);
virtual Cmd* recordDrawBatch(GrBatch*) = 0;
virtual Cmd* recordDrawBatch(GrBatch*, const GrCaps&) = 0;
virtual Cmd* recordStencilPath(const GrPipelineBuilder&,
const GrPathProcessor*,
const GrPath*,
@ -56,7 +56,6 @@ public:
GrSurface* src,
const SkIRect& srcRect,
const SkIPoint& dstPoint);
virtual Cmd* recordXferBarrierIfNecessary(const GrPipeline&, const GrCaps&);
protected:
typedef GrTargetCommands::DrawBatch DrawBatch;
@ -66,7 +65,6 @@ protected:
typedef GrTargetCommands::Clear Clear;
typedef GrTargetCommands::ClearStencilClip ClearStencilClip;
typedef GrTargetCommands::CopySurface CopySurface;
typedef GrTargetCommands::XferBarrier XferBarrier;
GrCommandBuilder(GrGpu* gpu) : fCommands(gpu) {}

View File

@ -389,8 +389,9 @@ private:
this->setBounds(devBounds);
}
bool onCombineIfPossible(GrBatch* t) override {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}

View File

@ -14,6 +14,7 @@
#include "GrGpuResourcePriv.h"
#include "GrIndexBuffer.h"
#include "GrPathRendering.h"
#include "GrPipeline.h"
#include "GrResourceCache.h"
#include "GrRenderTargetPriv.h"
#include "GrStencilAttachment.h"
@ -381,6 +382,10 @@ void GrGpu::removeGpuTraceMarker(const GrGpuTraceMarker* marker) {
void GrGpu::draw(const DrawArgs& args, const GrVertices& vertices) {
this->handleDirtyContext();
if (GrXferBarrierType barrierType = args.fPipeline->xferBarrierType(*this->caps())) {
this->xferBarrier(args.fPipeline->getRenderTarget(), barrierType);
}
GrVertices::Iterator iter;
const GrNonInstancedVertices* verts = iter.init(vertices);
do {

View File

@ -299,9 +299,6 @@ public:
const SkIRect& srcRect,
const SkIPoint& dstPoint) = 0;
// Called before certain draws in order to guarantee coherent results from dst reads.
virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
struct DrawArgs {
DrawArgs(const GrPrimitiveProcessor* primProc,
const GrPipeline* pipeline,
@ -432,6 +429,9 @@ private:
// assumed 3D context state and dirty any state cache.
virtual void onResetContext(uint32_t resetBits) = 0;
// Called before certain draws in order to guarantee coherent results from dst reads.
virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
// overridden by backend-specific derived class to create objects.
// Texture size and sample size will have already been validated in base class before
// onCreateTexture/CompressedTexture are called.

View File

@ -26,14 +26,6 @@ GrImmediateDrawTarget::~GrImmediateDrawTarget() {
}
void GrImmediateDrawTarget::onDrawBatch(GrBatch* batch) {
const GrXferProcessor& xp = *batch->pipeline()->getXferProcessor();
GrRenderTarget* rt = batch->pipeline()->getRenderTarget();
GrXferBarrierType barrierType;
if (xp.willNeedXferBarrier(rt, *this->caps(), &barrierType)) {
this->getGpu()->xferBarrier(rt, barrierType);
}
fBatchTarget.resetNumberOfDraws();
batch->generateGeometry(&fBatchTarget);

View File

@ -25,12 +25,13 @@ static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettin
return isWinding;
}
GrTargetCommands::Cmd* GrInOrderCommandBuilder::recordDrawBatch(GrBatch* batch) {
GrTargetCommands::Cmd* GrInOrderCommandBuilder::recordDrawBatch(GrBatch* batch,
const GrCaps& caps) {
GrBATCH_INFO("In-Recording (%s, %u)\n", batch->name(), batch->uniqueID());
if (!this->cmdBuffer()->empty() &&
Cmd::kDrawBatch_CmdType == this->cmdBuffer()->back().type()) {
DrawBatch* previous = static_cast<DrawBatch*>(&this->cmdBuffer()->back());
if (previous->batch()->combineIfPossible(batch)) {
if (previous->batch()->combineIfPossible(batch, caps)) {
GrBATCH_INFO("\tBatching with (%s, %u)\n",
previous->fBatch->name(), previous->fBatch->uniqueID());
return NULL;

View File

@ -17,7 +17,7 @@ public:
GrInOrderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) { }
Cmd* recordDrawBatch(GrBatch*) override;
Cmd* recordDrawBatch(GrBatch*, const GrCaps&) override;
Cmd* recordStencilPath(const GrPipelineBuilder&,
const GrPathProcessor*,
const GrPath*,

View File

@ -733,8 +733,9 @@ private:
this->setBounds(geometry.fDevBounds);
}
bool onCombineIfPossible(GrBatch* t) override {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}
@ -956,8 +957,9 @@ private:
this->setBounds(geometry.fDevBounds);
}
bool onCombineIfPossible(GrBatch* t) override {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}
@ -1214,8 +1216,9 @@ private:
this->setBounds(bounds);
}
bool onCombineIfPossible(GrBatch* t) override {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}
@ -1591,8 +1594,9 @@ private:
this->setBounds(geometry.fDevBounds);
}
bool onCombineIfPossible(GrBatch* t) override {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}
@ -1777,8 +1781,9 @@ private:
this->setBounds(geometry.fDevBounds);
}
bool onCombineIfPossible(GrBatch* t) override {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}

View File

@ -11,6 +11,7 @@
#include "SkPath.h"
#include "GrGpu.h"
#include "GrPathRange.h"
#include "GrPipeline.h"
class SkDescriptor;
class SkTypeface;
@ -166,6 +167,9 @@ public:
void drawPath(const DrawPathArgs& args, const GrPath* path) {
fGpu->handleDirtyContext();
if (GrXferBarrierType barrierType = args.fPipeline->xferBarrierType(*fGpu->caps())) {
fGpu->xferBarrier(args.fPipeline->getRenderTarget(), barrierType);
}
this->onDrawPath(args, path);
}
@ -173,10 +177,14 @@ public:
PathIndexType indexType, const float transformValues[],
PathTransformType transformType, int count) {
fGpu->handleDirtyContext();
if (GrXferBarrierType barrierType = args.fPipeline->xferBarrierType(*fGpu->caps())) {
fGpu->xferBarrier(args.fPipeline->getRenderTarget(), barrierType);
}
pathRange->willDrawPaths(indices, indexType, count);
this->onDrawPaths(args, pathRange, indices, indexType, transformValues, transformType,
count);
}
protected:
GrPathRendering(GrGpu* gpu)
: fGpu(gpu) {

View File

@ -150,28 +150,26 @@ void GrPipeline::adjustProgramFromOptimizations(const GrPipelineBuilder& pipelin
////////////////////////////////////////////////////////////////////////////////
bool GrPipeline::isEqual(const GrPipeline& that, bool ignoreCoordTransforms) const {
// If we point to the same pipeline, then we are necessarily equal
if (this == &that) {
return true;
}
bool GrPipeline::AreEqual(const GrPipeline& a, const GrPipeline& b,
bool ignoreCoordTransforms) {
SkASSERT(&a != &b);
if (this->getRenderTarget() != that.getRenderTarget() ||
this->fFragmentStages.count() != that.fFragmentStages.count() ||
this->fNumColorStages != that.fNumColorStages ||
this->fScissorState != that.fScissorState ||
this->fFlags != that.fFlags ||
this->fStencilSettings != that.fStencilSettings ||
this->fDrawFace != that.fDrawFace) {
if (a.getRenderTarget() != b.getRenderTarget() ||
a.fFragmentStages.count() != b.fFragmentStages.count() ||
a.fNumColorStages != b.fNumColorStages ||
a.fScissorState != b.fScissorState ||
a.fFlags != b.fFlags ||
a.fStencilSettings != b.fStencilSettings ||
a.fDrawFace != b.fDrawFace) {
return false;
}
if (!this->getXferProcessor()->isEqual(*that.getXferProcessor())) {
if (!a.getXferProcessor()->isEqual(*b.getXferProcessor())) {
return false;
}
for (int i = 0; i < this->numFragmentStages(); i++) {
if (!this->getFragmentStage(i).processor()->isEqual(*that.getFragmentStage(i).processor(),
for (int i = 0; i < a.numFragmentStages(); i++) {
if (!a.getFragmentStage(i).processor()->isEqual(*b.getFragmentStage(i).processor(),
ignoreCoordTransforms)) {
return false;
}

View File

@ -29,6 +29,9 @@ class GrPipelineBuilder;
*/
class GrPipeline : public GrNonAtomicRef {
public:
///////////////////////////////////////////////////////////////////////////
/// @name Creation
struct CreateArgs {
const GrPipelineBuilder* fPipelineBuilder;
const GrCaps* fCaps;
@ -41,13 +44,39 @@ public:
/** Creates a pipeline into a pre-allocated buffer */
static GrPipeline* CreateAt(void* memory, const CreateArgs&, GrPipelineOptimizations*);
/*
/// @}
///////////////////////////////////////////////////////////////////////////
/// @name Comparisons
/**
* Returns true if these pipelines are equivalent. Coord transforms may be applied either on
* the GPU or the CPU. When we apply them on the CPU then the matrices need not agree in order
* to combine draws. Therefore we take a param that indicates whether coord transforms should be
* compared."
*/
bool isEqual(const GrPipeline& that, bool ignoreCoordTransforms = false) const;
static bool AreEqual(const GrPipeline& a, const GrPipeline& b, bool ignoreCoordTransforms);
/**
* Allows a GrBatch subclass to determine whether two GrBatches can combine. This is a stricter
* test than isEqual because it also considers blend barriers when the two batches' bounds
* overlap
*/
static bool CanCombine(const GrPipeline& a, const SkRect& aBounds,
const GrPipeline& b, const SkRect& bBounds,
const GrCaps& caps,
bool ignoreCoordTransforms = false) {
if (!AreEqual(a, b, ignoreCoordTransforms)) {
return false;
}
if (a.xferBarrierType(caps)) {
return aBounds.fRight <= bBounds.fLeft ||
aBounds.fBottom <= bBounds.fTop ||
bBounds.fRight <= aBounds.fLeft ||
bBounds.fBottom <= aBounds.fTop;
}
return true;
}
/// @}
@ -90,6 +119,10 @@ public:
bool isHWAntialiasState() const { return SkToBool(fFlags & kHWAA_Flag); }
bool snapVerticesToPixelCenters() const { return SkToBool(fFlags & kSnapVertices_Flag); }
GrXferBarrierType xferBarrierType(const GrCaps& caps) const {
return fXferProcessor->xferBarrierType(fRenderTarget.get(), caps);
}
/**
* Gets whether the target is drawing clockwise, counterclockwise,
* or both faces.

View File

@ -15,7 +15,8 @@ static bool intersect(const Left& a, const Right& b) {
a.fTop < b.fBottom && b.fTop < a.fBottom;
}
GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(GrBatch* batch) {
GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(GrBatch* batch,
const GrCaps& caps) {
// Check if there is a Batch Draw we can batch with by linearly searching back until we either
// 1) check every draw
// 2) intersect with something
@ -58,7 +59,7 @@ GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(GrBatch* batch)
break;
}
// We cannot continue to search backwards if the render target changes
if (previous->batch()->combineIfPossible(batch)) {
if (previous->batch()->combineIfPossible(batch, caps)) {
GrBATCH_INFO("\t\tCombining with (%s, B%u)\n",
previous->fBatch->name(), previous->fBatch->uniqueID());
return NULL;

View File

@ -17,7 +17,7 @@ public:
GrReorderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) {}
Cmd* recordDrawBatch(GrBatch*) override;
Cmd* recordDrawBatch(GrBatch*, const GrCaps&) override;
Cmd* recordStencilPath(const GrPipelineBuilder&,
const GrPathProcessor*,
const GrPath*,

View File

@ -75,7 +75,7 @@ void GrTargetCommands::DrawPaths::execute(GrGpu* gpu) {
fTransformType, fCount);
}
void GrTargetCommands::DrawBatch::execute(GrGpu*) {
void GrTargetCommands::DrawBatch::execute(GrGpu* gpu) {
fBatchTarget->flushNext(fBatch->numberOfDraws());
}
@ -94,7 +94,3 @@ void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu) {
void GrTargetCommands::CopySurface::execute(GrGpu* gpu) {
gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
}
void GrTargetCommands::XferBarrier::execute(GrGpu* gpu) {
gpu->xferBarrier(fRenderTarget.get(), fBarrierType);
}

View File

@ -22,9 +22,7 @@
class GrBufferedDrawTarget;
// TODO: Convert all commands into GrBatch and remove this class. Xferbarrier will just become a
// batch blocker (when there is overlap) and the xp is responsible for issuing any barrier calls
// on the backend.
// TODO: Convert all commands into GrBatch and remove this class.
class GrTargetCommands : ::SkNoncopyable {
public:
GrTargetCommands(GrGpu* gpu)
@ -42,7 +40,6 @@ public:
kDrawPath_CmdType = 5,
kDrawPaths_CmdType = 6,
kDrawBatch_CmdType = 7,
kXferBarrier_CmdType = 8,
};
Cmd(CmdType type)
@ -83,8 +80,6 @@ private:
typedef GrGpu::DrawArgs DrawArgs;
void recordXferBarrierIfNecessary(const GrPipeline&, GrBufferedDrawTarget*);
// TODO: This can be just a pipeline once paths are in batch, and it should live elsewhere
struct StateForPathDraw : public SkNVRefCnt<StateForPathDraw> {
// TODO get rid of the prim proc parameter when we use batch everywhere
@ -251,20 +246,6 @@ private:
GrBatchTarget* fBatchTarget;
};
struct XferBarrier : public Cmd {
XferBarrier(GrRenderTarget* rt)
: Cmd(kXferBarrier_CmdType)
, fRenderTarget(rt) {
}
void execute(GrGpu*) override;
GrXferBarrierType fBarrierType;
private:
GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
};
static const int kCmdBufferInitialSizeInBytes = 8 * 1024;
typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.

View File

@ -1576,9 +1576,7 @@ public:
batchTarget->draw(vertices);
}
bool onCombineIfPossible(GrBatch*) override {
return false;
}
bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
private:
TessellatingPathBatch(const GrColor& color,

View File

@ -175,11 +175,11 @@ public:
return false;
}
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
private:
void onResetContext(uint32_t resetBits) override {}
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
GrTexture* onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
const void* srcData, size_t rowBytes) override {
return NULL;

View File

@ -93,17 +93,15 @@ void GrXferProcessor::getGLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBu
this->onGetGLProcessorKey(caps, b);
}
bool GrXferProcessor::willNeedXferBarrier(const GrRenderTarget* rt,
const GrCaps& caps,
GrXferBarrierType* outBarrierType) const {
GrXferBarrierType GrXferProcessor::xferBarrierType(const GrRenderTarget* rt,
const GrCaps& caps) const {
SkASSERT(rt);
if (static_cast<const GrSurface*>(rt) == this->getDstTexture()) {
// Texture barriers are required when a shader reads and renders to the same texture.
SkASSERT(rt);
SkASSERT(caps.textureBarrierSupport());
*outBarrierType = kTexture_GrXferBarrierType;
return true;
return kTexture_GrXferBarrierType;
}
return this->onWillNeedXferBarrier(rt, caps, outBarrierType);
return this->onXferBarrier(rt, caps);
}
#ifdef SK_DEBUG

View File

@ -130,7 +130,6 @@ public:
args.fDevRect,
canTweakAlphaForCoverage);
}
helper.issueDraw(batchTarget);
}
@ -176,8 +175,9 @@ private:
const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
bool onCombineIfPossible(GrBatch* t) override {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}

View File

@ -15,12 +15,10 @@ class SkMatrix;
struct SkRect;
namespace GrAAFillRectBatch {
GrBatch* Create(GrColor color,
const SkMatrix& viewMatrix,
const SkRect& rect,
const SkRect& devRect);
};
#endif

View File

@ -203,8 +203,9 @@ const GrIndexBuffer* GrAAStrokeRectBatch::GetIndexBuffer(GrResourceProvider* res
}
}
bool GrAAStrokeRectBatch::onCombineIfPossible(GrBatch* t) {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool GrAAStrokeRectBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}

View File

@ -78,7 +78,7 @@ private:
bool miterStroke() const { return fBatch.fMiterStroke; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
bool onCombineIfPossible(GrBatch* t) override;
bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
void generateAAStrokeRectGeometry(void* vertices,
size_t offset,

View File

@ -54,16 +54,14 @@ public:
virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0;
virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0;
bool combineIfPossible(GrBatch* that) {
bool combineIfPossible(GrBatch* that, const GrCaps& caps) {
if (this->classID() != that->classID()) {
return false;
}
return this->onCombineIfPossible(that);
return this->onCombineIfPossible(that, caps);
}
virtual bool onCombineIfPossible(GrBatch*) = 0;
virtual void generateGeometry(GrBatchTarget*) = 0;
const SkRect& bounds() const { return fBounds; }
@ -162,6 +160,8 @@ protected:
SkRect fBounds;
private:
virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
/*
* initBatchTracker is a hook for the some additional overrides / optimization possibilities
* from the GrXferProcessor.

View File

@ -156,8 +156,9 @@ GrDrawAtlasBatch::GrDrawAtlasBatch(const Geometry& geometry, const SkMatrix& vie
this->setBounds(bounds);
}
bool GrDrawAtlasBatch::onCombineIfPossible(GrBatch* t) {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool GrDrawAtlasBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}

View File

@ -56,7 +56,7 @@ private:
int quadCount() const { return fQuadCount; }
bool coverageIgnored() const { return fCoverageIgnored; }
bool onCombineIfPossible(GrBatch* t) override;
bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
SkSTArray<1, Geometry, true> fGeoData;
SkMatrix fViewMatrix;

View File

@ -183,8 +183,9 @@ void GrDrawVerticesBatch::generateGeometry(GrBatchTarget* batchTarget) {
batchTarget->draw(vertices);
}
bool GrDrawVerticesBatch::onCombineIfPossible(GrBatch* t) {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool GrDrawVerticesBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}

View File

@ -75,7 +75,7 @@ private:
int indexCount() const { return fBatch.fIndexCount; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
bool onCombineIfPossible(GrBatch* t) override;
bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
struct BatchTracker {
GrPrimitiveType fPrimitiveType;

View File

@ -80,8 +80,9 @@ void GrRectBatch::generateGeometry(GrBatchTarget* batchTarget) {
helper.issueDraw(batchTarget);
}
bool GrRectBatch::onCombineIfPossible(GrBatch* t) {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool GrRectBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}

View File

@ -66,7 +66,7 @@ private:
bool hasLocalMatrix() const { return fGeoData[0].fHasLocalMatrix; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
bool onCombineIfPossible(GrBatch* t) override;
bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
const GrGeometryProcessor* createRectGP();

View File

@ -50,10 +50,11 @@ private:
bool hairline() const { return fBatch.fHairline; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
bool onCombineIfPossible(GrBatch* t) override {
//if (!this->pipeline()->isEqual(*t->pipeline())) {
// return false;
//}
bool onCombineIfPossible(GrBatch* t, const GrCaps&) override {
// if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(),
// t->bounds(), caps)) {
// return false;
// }
// GrStrokeRectBatch* that = t->cast<StrokeRectBatch>();
// NonAA stroke rects cannot batch right now

View File

@ -66,7 +66,7 @@ private:
virtual Geometry* geoData(int index) = 0;
virtual const Geometry* geoData(int index) const = 0;
bool onCombineIfPossible(GrBatch* t) override {
bool onCombineIfPossible(GrBatch* t, const GrCaps&) override {
return false;
}

View File

@ -552,9 +552,7 @@ private:
void onGetGLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
bool onWillNeedXferBarrier(const GrRenderTarget* rt,
const GrCaps& caps,
GrXferBarrierType* outBarrierType) const override;
GrXferBarrierType onXferBarrier(const GrRenderTarget*, const GrCaps&) const override;
void onGetBlendInfo(BlendInfo*) const override;
@ -757,14 +755,11 @@ GrXferProcessor::OptFlags CustomXP::onGetOptimizations(const GrProcOptInfo& colo
return flags;
}
bool CustomXP::onWillNeedXferBarrier(const GrRenderTarget* rt,
const GrCaps& caps,
GrXferBarrierType* outBarrierType) const {
GrXferBarrierType CustomXP::onXferBarrier(const GrRenderTarget* rt, const GrCaps& caps) const {
if (this->hasHWBlendEquation() && !caps.advancedCoherentBlendEquationSupport()) {
*outBarrierType = kBlend_GrXferBarrierType;
return true;
return kBlend_GrXferBarrierType;
}
return false;
return kNone_GrXferBarrierType;
}
void CustomXP::onGetBlendInfo(BlendInfo* blendInfo) const {

View File

@ -617,8 +617,9 @@ private:
combinedMatrix.mapRect(&fBounds);
}
bool onCombineIfPossible(GrBatch* t) override {
if (!this->pipeline()->isEqual(*t->pipeline())) {
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false;
}

View File

@ -3062,6 +3062,7 @@ bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst,
}
void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
SkASSERT(type);
switch (type) {
case kTexture_GrXferBarrierType: {
GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
@ -3080,6 +3081,7 @@ void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
this->caps()->blendEquationSupport());
GL_CALL(BlendBarrier());
return;
default: break; // placate compiler warnings that kNone not handled
}
}

View File

@ -96,8 +96,6 @@ public:
const SkIRect& srcRect,
const SkIPoint& dstPoint) override;
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override;
void buildProgramDesc(GrProgramDesc*,
const GrPrimitiveProcessor&,
const GrPipeline&,
@ -118,6 +116,8 @@ private:
// GrGpu overrides
void onResetContext(uint32_t resetBits) override;
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override;
GrTexture* onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
const void* srcData, size_t rowBytes) override;
GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc,

View File

@ -910,7 +910,7 @@ static void test_lcd_coverage(skiatest::Reporter* reporter, const GrCaps& caps)
const char* name() const override { return "Test LCD Text Batch"; }
void initBatchTracker(const GrPipelineOptimizations&) override {}
bool onCombineIfPossible(GrBatch*) override { return false; }
bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
void generateGeometry(GrBatchTarget*) override {}
} testLCDCoverageBatch;