Check for xfer barriers in GrBatch, auto-issue barriers in GrGpu

Review URL: https://codereview.chromium.org/1287973003
This commit is contained in:
bsalomon 2015-08-12 11:14:50 -07:00 committed by Commit bot
parent 6028a84765
commit cb02b38b2c
44 changed files with 170 additions and 168 deletions

View File

@ -25,9 +25,12 @@ class GrProcOptInfo;
* required after a pixel has been written, before it can be safely read again. * required after a pixel has been written, before it can be safely read again.
*/ */
enum GrXferBarrierType { enum GrXferBarrierType {
kTexture_GrXferBarrierType, //<! Required when a shader reads and renders to the same texture. kNone_GrXferBarrierType = 0, //<! No barrier is required
kBlend_GrXferBarrierType, //<! Required by certain blend extensions. kTexture_GrXferBarrierType, //<! Required when a shader reads and renders to the same texture.
kBlend_GrXferBarrierType, //<! Required by certain blend extensions.
}; };
/** Should be able to treat kNone as false in boolean expressions */
GR_STATIC_ASSERT(SkToBool(kNone_GrXferBarrierType) == false);
/** /**
* GrXferProcessor is responsible for implementing the xfer mode that blends the src color and dst * GrXferProcessor is responsible for implementing the xfer mode that blends the src color and dst
@ -148,9 +151,7 @@ public:
* Returns whether this XP will require an Xfer barrier on the given rt. If true, outBarrierType * Returns whether this XP will require an Xfer barrier on the given rt. If true, outBarrierType
* is updated to contain the type of barrier needed. * is updated to contain the type of barrier needed.
*/ */
bool willNeedXferBarrier(const GrRenderTarget* rt, GrXferBarrierType xferBarrierType(const GrRenderTarget* rt, const GrCaps& caps) const;
const GrCaps& caps,
GrXferBarrierType* outBarrierType) const;
struct BlendInfo { struct BlendInfo {
void reset() { void reset() {
@ -256,13 +257,12 @@ private:
GrProcessorKeyBuilder* b) const = 0; GrProcessorKeyBuilder* b) const = 0;
/** /**
* If not using a texture barrier, retrieves whether the subclass will require a different type * Determines the type of barrier (if any) required by the subclass. Note that the possibility
* of barrier. * that a kTexture type barrier is required is handled by the base class and need not be
* considered by subclass overrides of this function.
*/ */
virtual bool onWillNeedXferBarrier(const GrRenderTarget*, virtual GrXferBarrierType onXferBarrier(const GrRenderTarget*, const GrCaps&) const {
const GrCaps&, return kNone_GrXferBarrierType;
GrXferBarrierType* outBarrierType SK_UNUSED) const {
return false;
} }
/** /**

View File

@ -940,8 +940,9 @@ private:
geometry.fViewMatrix.mapRect(&fBounds); geometry.fViewMatrix.mapRect(&fBounds);
} }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }

View File

@ -492,8 +492,9 @@ private:
const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; } const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; }
bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; } bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }

View File

@ -731,8 +731,9 @@ private:
fBounds.outset(0.5f, 0.5f); fBounds.outset(0.5f, 0.5f);
} }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }

View File

@ -264,8 +264,9 @@ private:
geometry.fViewMatrix.mapRect(&fBounds); geometry.fViewMatrix.mapRect(&fBounds);
} }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }

View File

@ -1893,8 +1893,9 @@ private:
bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; } bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
int numGlyphs() const { return fBatch.fNumGlyphs; } int numGlyphs() const { return fBatch.fNumGlyphs; }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }

View File

@ -27,8 +27,7 @@ GrBufferedDrawTarget::~GrBufferedDrawTarget() {
} }
void GrBufferedDrawTarget::onDrawBatch(GrBatch* batch) { void GrBufferedDrawTarget::onDrawBatch(GrBatch* batch) {
fCommands->recordXferBarrierIfNecessary(*batch->pipeline(), *this->caps()); fCommands->recordDrawBatch(batch, *this->caps());
fCommands->recordDrawBatch(batch);
} }
void GrBufferedDrawTarget::onStencilPath(const GrPipelineBuilder& pipelineBuilder, void GrBufferedDrawTarget::onStencilPath(const GrPipelineBuilder& pipelineBuilder,
@ -131,12 +130,11 @@ GrBufferedDrawTarget::createStateForPathDraw(const GrPrimitiveProcessor* primPro
fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker, fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
*state->fPrimitiveProcessor, *state->fPrimitiveProcessor,
state->fBatchTracker) && state->fBatchTracker) &&
fPrevState->getPipeline()->isEqual(*state->getPipeline())) { GrPipeline::AreEqual(*fPrevState->getPipeline(), *state->getPipeline(), false)) {
this->unallocState(state); this->unallocState(state);
} else { } else {
fPrevState.reset(state); fPrevState.reset(state);
} }
fCommands->recordXferBarrierIfNecessary(*fPrevState->getPipeline(), *this->caps());
return fPrevState; return fPrevState;
} }

View File

@ -65,20 +65,3 @@ GrTargetCommands::Cmd* GrCommandBuilder::recordCopySurface(GrSurface* dst,
GrBATCH_INFO("Recording copysurface %d\n", cs->uniqueID()); GrBATCH_INFO("Recording copysurface %d\n", cs->uniqueID());
return cs; return cs;
} }
GrTargetCommands::Cmd*
GrCommandBuilder::recordXferBarrierIfNecessary(const GrPipeline& pipeline,
const GrCaps& caps) {
const GrXferProcessor& xp = *pipeline.getXferProcessor();
GrRenderTarget* rt = pipeline.getRenderTarget();
GrXferBarrierType barrierType;
if (!xp.willNeedXferBarrier(rt, caps, &barrierType)) {
return NULL;
}
XferBarrier* xb = GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), XferBarrier, (rt));
xb->fBarrierType = barrierType;
GrBATCH_INFO("Recording xfer barrier %d\n", xb->uniqueID());
return xb;
}

View File

@ -28,7 +28,7 @@ public:
bool insideClip, bool insideClip,
GrRenderTarget* renderTarget); GrRenderTarget* renderTarget);
virtual Cmd* recordDiscard(GrRenderTarget*); virtual Cmd* recordDiscard(GrRenderTarget*);
virtual Cmd* recordDrawBatch(GrBatch*) = 0; virtual Cmd* recordDrawBatch(GrBatch*, const GrCaps&) = 0;
virtual Cmd* recordStencilPath(const GrPipelineBuilder&, virtual Cmd* recordStencilPath(const GrPipelineBuilder&,
const GrPathProcessor*, const GrPathProcessor*,
const GrPath*, const GrPath*,
@ -56,7 +56,6 @@ public:
GrSurface* src, GrSurface* src,
const SkIRect& srcRect, const SkIRect& srcRect,
const SkIPoint& dstPoint); const SkIPoint& dstPoint);
virtual Cmd* recordXferBarrierIfNecessary(const GrPipeline&, const GrCaps&);
protected: protected:
typedef GrTargetCommands::DrawBatch DrawBatch; typedef GrTargetCommands::DrawBatch DrawBatch;
@ -66,7 +65,6 @@ protected:
typedef GrTargetCommands::Clear Clear; typedef GrTargetCommands::Clear Clear;
typedef GrTargetCommands::ClearStencilClip ClearStencilClip; typedef GrTargetCommands::ClearStencilClip ClearStencilClip;
typedef GrTargetCommands::CopySurface CopySurface; typedef GrTargetCommands::CopySurface CopySurface;
typedef GrTargetCommands::XferBarrier XferBarrier;
GrCommandBuilder(GrGpu* gpu) : fCommands(gpu) {} GrCommandBuilder(GrGpu* gpu) : fCommands(gpu) {}

View File

@ -389,8 +389,9 @@ private:
this->setBounds(devBounds); this->setBounds(devBounds);
} }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }

View File

@ -14,6 +14,7 @@
#include "GrGpuResourcePriv.h" #include "GrGpuResourcePriv.h"
#include "GrIndexBuffer.h" #include "GrIndexBuffer.h"
#include "GrPathRendering.h" #include "GrPathRendering.h"
#include "GrPipeline.h"
#include "GrResourceCache.h" #include "GrResourceCache.h"
#include "GrRenderTargetPriv.h" #include "GrRenderTargetPriv.h"
#include "GrStencilAttachment.h" #include "GrStencilAttachment.h"
@ -381,6 +382,10 @@ void GrGpu::removeGpuTraceMarker(const GrGpuTraceMarker* marker) {
void GrGpu::draw(const DrawArgs& args, const GrVertices& vertices) { void GrGpu::draw(const DrawArgs& args, const GrVertices& vertices) {
this->handleDirtyContext(); this->handleDirtyContext();
if (GrXferBarrierType barrierType = args.fPipeline->xferBarrierType(*this->caps())) {
this->xferBarrier(args.fPipeline->getRenderTarget(), barrierType);
}
GrVertices::Iterator iter; GrVertices::Iterator iter;
const GrNonInstancedVertices* verts = iter.init(vertices); const GrNonInstancedVertices* verts = iter.init(vertices);
do { do {

View File

@ -299,9 +299,6 @@ public:
const SkIRect& srcRect, const SkIRect& srcRect,
const SkIPoint& dstPoint) = 0; const SkIPoint& dstPoint) = 0;
// Called before certain draws in order to guarantee coherent results from dst reads.
virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
struct DrawArgs { struct DrawArgs {
DrawArgs(const GrPrimitiveProcessor* primProc, DrawArgs(const GrPrimitiveProcessor* primProc,
const GrPipeline* pipeline, const GrPipeline* pipeline,
@ -432,6 +429,9 @@ private:
// assumed 3D context state and dirty any state cache. // assumed 3D context state and dirty any state cache.
virtual void onResetContext(uint32_t resetBits) = 0; virtual void onResetContext(uint32_t resetBits) = 0;
// Called before certain draws in order to guarantee coherent results from dst reads.
virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
// overridden by backend-specific derived class to create objects. // overridden by backend-specific derived class to create objects.
// Texture size and sample size will have already been validated in base class before // Texture size and sample size will have already been validated in base class before
// onCreateTexture/CompressedTexture are called. // onCreateTexture/CompressedTexture are called.

View File

@ -26,14 +26,6 @@ GrImmediateDrawTarget::~GrImmediateDrawTarget() {
} }
void GrImmediateDrawTarget::onDrawBatch(GrBatch* batch) { void GrImmediateDrawTarget::onDrawBatch(GrBatch* batch) {
const GrXferProcessor& xp = *batch->pipeline()->getXferProcessor();
GrRenderTarget* rt = batch->pipeline()->getRenderTarget();
GrXferBarrierType barrierType;
if (xp.willNeedXferBarrier(rt, *this->caps(), &barrierType)) {
this->getGpu()->xferBarrier(rt, barrierType);
}
fBatchTarget.resetNumberOfDraws(); fBatchTarget.resetNumberOfDraws();
batch->generateGeometry(&fBatchTarget); batch->generateGeometry(&fBatchTarget);

View File

@ -25,12 +25,13 @@ static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettin
return isWinding; return isWinding;
} }
GrTargetCommands::Cmd* GrInOrderCommandBuilder::recordDrawBatch(GrBatch* batch) { GrTargetCommands::Cmd* GrInOrderCommandBuilder::recordDrawBatch(GrBatch* batch,
const GrCaps& caps) {
GrBATCH_INFO("In-Recording (%s, %u)\n", batch->name(), batch->uniqueID()); GrBATCH_INFO("In-Recording (%s, %u)\n", batch->name(), batch->uniqueID());
if (!this->cmdBuffer()->empty() && if (!this->cmdBuffer()->empty() &&
Cmd::kDrawBatch_CmdType == this->cmdBuffer()->back().type()) { Cmd::kDrawBatch_CmdType == this->cmdBuffer()->back().type()) {
DrawBatch* previous = static_cast<DrawBatch*>(&this->cmdBuffer()->back()); DrawBatch* previous = static_cast<DrawBatch*>(&this->cmdBuffer()->back());
if (previous->batch()->combineIfPossible(batch)) { if (previous->batch()->combineIfPossible(batch, caps)) {
GrBATCH_INFO("\tBatching with (%s, %u)\n", GrBATCH_INFO("\tBatching with (%s, %u)\n",
previous->fBatch->name(), previous->fBatch->uniqueID()); previous->fBatch->name(), previous->fBatch->uniqueID());
return NULL; return NULL;

View File

@ -17,7 +17,7 @@ public:
GrInOrderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) { } GrInOrderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) { }
Cmd* recordDrawBatch(GrBatch*) override; Cmd* recordDrawBatch(GrBatch*, const GrCaps&) override;
Cmd* recordStencilPath(const GrPipelineBuilder&, Cmd* recordStencilPath(const GrPipelineBuilder&,
const GrPathProcessor*, const GrPathProcessor*,
const GrPath*, const GrPath*,

View File

@ -733,8 +733,9 @@ private:
this->setBounds(geometry.fDevBounds); this->setBounds(geometry.fDevBounds);
} }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }
@ -956,8 +957,9 @@ private:
this->setBounds(geometry.fDevBounds); this->setBounds(geometry.fDevBounds);
} }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }
@ -1214,8 +1216,9 @@ private:
this->setBounds(bounds); this->setBounds(bounds);
} }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }
@ -1591,8 +1594,9 @@ private:
this->setBounds(geometry.fDevBounds); this->setBounds(geometry.fDevBounds);
} }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }
@ -1777,8 +1781,9 @@ private:
this->setBounds(geometry.fDevBounds); this->setBounds(geometry.fDevBounds);
} }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }

View File

@ -11,6 +11,7 @@
#include "SkPath.h" #include "SkPath.h"
#include "GrGpu.h" #include "GrGpu.h"
#include "GrPathRange.h" #include "GrPathRange.h"
#include "GrPipeline.h"
class SkDescriptor; class SkDescriptor;
class SkTypeface; class SkTypeface;
@ -166,6 +167,9 @@ public:
void drawPath(const DrawPathArgs& args, const GrPath* path) { void drawPath(const DrawPathArgs& args, const GrPath* path) {
fGpu->handleDirtyContext(); fGpu->handleDirtyContext();
if (GrXferBarrierType barrierType = args.fPipeline->xferBarrierType(*fGpu->caps())) {
fGpu->xferBarrier(args.fPipeline->getRenderTarget(), barrierType);
}
this->onDrawPath(args, path); this->onDrawPath(args, path);
} }
@ -173,10 +177,14 @@ public:
PathIndexType indexType, const float transformValues[], PathIndexType indexType, const float transformValues[],
PathTransformType transformType, int count) { PathTransformType transformType, int count) {
fGpu->handleDirtyContext(); fGpu->handleDirtyContext();
if (GrXferBarrierType barrierType = args.fPipeline->xferBarrierType(*fGpu->caps())) {
fGpu->xferBarrier(args.fPipeline->getRenderTarget(), barrierType);
}
pathRange->willDrawPaths(indices, indexType, count); pathRange->willDrawPaths(indices, indexType, count);
this->onDrawPaths(args, pathRange, indices, indexType, transformValues, transformType, this->onDrawPaths(args, pathRange, indices, indexType, transformValues, transformType,
count); count);
} }
protected: protected:
GrPathRendering(GrGpu* gpu) GrPathRendering(GrGpu* gpu)
: fGpu(gpu) { : fGpu(gpu) {

View File

@ -150,28 +150,26 @@ void GrPipeline::adjustProgramFromOptimizations(const GrPipelineBuilder& pipelin
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
bool GrPipeline::isEqual(const GrPipeline& that, bool ignoreCoordTransforms) const { bool GrPipeline::AreEqual(const GrPipeline& a, const GrPipeline& b,
// If we point to the same pipeline, then we are necessarily equal bool ignoreCoordTransforms) {
if (this == &that) { SkASSERT(&a != &b);
return true;
}
if (this->getRenderTarget() != that.getRenderTarget() || if (a.getRenderTarget() != b.getRenderTarget() ||
this->fFragmentStages.count() != that.fFragmentStages.count() || a.fFragmentStages.count() != b.fFragmentStages.count() ||
this->fNumColorStages != that.fNumColorStages || a.fNumColorStages != b.fNumColorStages ||
this->fScissorState != that.fScissorState || a.fScissorState != b.fScissorState ||
this->fFlags != that.fFlags || a.fFlags != b.fFlags ||
this->fStencilSettings != that.fStencilSettings || a.fStencilSettings != b.fStencilSettings ||
this->fDrawFace != that.fDrawFace) { a.fDrawFace != b.fDrawFace) {
return false; return false;
} }
if (!this->getXferProcessor()->isEqual(*that.getXferProcessor())) { if (!a.getXferProcessor()->isEqual(*b.getXferProcessor())) {
return false; return false;
} }
for (int i = 0; i < this->numFragmentStages(); i++) { for (int i = 0; i < a.numFragmentStages(); i++) {
if (!this->getFragmentStage(i).processor()->isEqual(*that.getFragmentStage(i).processor(), if (!a.getFragmentStage(i).processor()->isEqual(*b.getFragmentStage(i).processor(),
ignoreCoordTransforms)) { ignoreCoordTransforms)) {
return false; return false;
} }

View File

@ -29,6 +29,9 @@ class GrPipelineBuilder;
*/ */
class GrPipeline : public GrNonAtomicRef { class GrPipeline : public GrNonAtomicRef {
public: public:
///////////////////////////////////////////////////////////////////////////
/// @name Creation
struct CreateArgs { struct CreateArgs {
const GrPipelineBuilder* fPipelineBuilder; const GrPipelineBuilder* fPipelineBuilder;
const GrCaps* fCaps; const GrCaps* fCaps;
@ -41,13 +44,39 @@ public:
/** Creates a pipeline into a pre-allocated buffer */ /** Creates a pipeline into a pre-allocated buffer */
static GrPipeline* CreateAt(void* memory, const CreateArgs&, GrPipelineOptimizations*); static GrPipeline* CreateAt(void* memory, const CreateArgs&, GrPipelineOptimizations*);
/* /// @}
///////////////////////////////////////////////////////////////////////////
/// @name Comparisons
/**
* Returns true if these pipelines are equivalent. Coord transforms may be applied either on * Returns true if these pipelines are equivalent. Coord transforms may be applied either on
* the GPU or the CPU. When we apply them on the CPU then the matrices need not agree in order * the GPU or the CPU. When we apply them on the CPU then the matrices need not agree in order
* to combine draws. Therefore we take a param that indicates whether coord transforms should be * to combine draws. Therefore we take a param that indicates whether coord transforms should be
* compared." * compared."
*/ */
bool isEqual(const GrPipeline& that, bool ignoreCoordTransforms = false) const; static bool AreEqual(const GrPipeline& a, const GrPipeline& b, bool ignoreCoordTransforms);
/**
* Allows a GrBatch subclass to determine whether two GrBatches can combine. This is a stricter
* test than isEqual because it also considers blend barriers when the two batches' bounds
* overlap
*/
static bool CanCombine(const GrPipeline& a, const SkRect& aBounds,
const GrPipeline& b, const SkRect& bBounds,
const GrCaps& caps,
bool ignoreCoordTransforms = false) {
if (!AreEqual(a, b, ignoreCoordTransforms)) {
return false;
}
if (a.xferBarrierType(caps)) {
return aBounds.fRight <= bBounds.fLeft ||
aBounds.fBottom <= bBounds.fTop ||
bBounds.fRight <= aBounds.fLeft ||
bBounds.fBottom <= aBounds.fTop;
}
return true;
}
/// @} /// @}
@ -90,6 +119,10 @@ public:
bool isHWAntialiasState() const { return SkToBool(fFlags & kHWAA_Flag); } bool isHWAntialiasState() const { return SkToBool(fFlags & kHWAA_Flag); }
bool snapVerticesToPixelCenters() const { return SkToBool(fFlags & kSnapVertices_Flag); } bool snapVerticesToPixelCenters() const { return SkToBool(fFlags & kSnapVertices_Flag); }
GrXferBarrierType xferBarrierType(const GrCaps& caps) const {
return fXferProcessor->xferBarrierType(fRenderTarget.get(), caps);
}
/** /**
* Gets whether the target is drawing clockwise, counterclockwise, * Gets whether the target is drawing clockwise, counterclockwise,
* or both faces. * or both faces.

View File

@ -15,7 +15,8 @@ static bool intersect(const Left& a, const Right& b) {
a.fTop < b.fBottom && b.fTop < a.fBottom; a.fTop < b.fBottom && b.fTop < a.fBottom;
} }
GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(GrBatch* batch) { GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(GrBatch* batch,
const GrCaps& caps) {
// Check if there is a Batch Draw we can batch with by linearly searching back until we either // Check if there is a Batch Draw we can batch with by linearly searching back until we either
// 1) check every draw // 1) check every draw
// 2) intersect with something // 2) intersect with something
@ -58,7 +59,7 @@ GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(GrBatch* batch)
break; break;
} }
// We cannot continue to search backwards if the render target changes // We cannot continue to search backwards if the render target changes
if (previous->batch()->combineIfPossible(batch)) { if (previous->batch()->combineIfPossible(batch, caps)) {
GrBATCH_INFO("\t\tCombining with (%s, B%u)\n", GrBATCH_INFO("\t\tCombining with (%s, B%u)\n",
previous->fBatch->name(), previous->fBatch->uniqueID()); previous->fBatch->name(), previous->fBatch->uniqueID());
return NULL; return NULL;

View File

@ -17,7 +17,7 @@ public:
GrReorderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) {} GrReorderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) {}
Cmd* recordDrawBatch(GrBatch*) override; Cmd* recordDrawBatch(GrBatch*, const GrCaps&) override;
Cmd* recordStencilPath(const GrPipelineBuilder&, Cmd* recordStencilPath(const GrPipelineBuilder&,
const GrPathProcessor*, const GrPathProcessor*,
const GrPath*, const GrPath*,

View File

@ -75,7 +75,7 @@ void GrTargetCommands::DrawPaths::execute(GrGpu* gpu) {
fTransformType, fCount); fTransformType, fCount);
} }
void GrTargetCommands::DrawBatch::execute(GrGpu*) { void GrTargetCommands::DrawBatch::execute(GrGpu* gpu) {
fBatchTarget->flushNext(fBatch->numberOfDraws()); fBatchTarget->flushNext(fBatch->numberOfDraws());
} }
@ -94,7 +94,3 @@ void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu) {
void GrTargetCommands::CopySurface::execute(GrGpu* gpu) { void GrTargetCommands::CopySurface::execute(GrGpu* gpu) {
gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
} }
void GrTargetCommands::XferBarrier::execute(GrGpu* gpu) {
gpu->xferBarrier(fRenderTarget.get(), fBarrierType);
}

View File

@ -22,9 +22,7 @@
class GrBufferedDrawTarget; class GrBufferedDrawTarget;
// TODO: Convert all commands into GrBatch and remove this class. Xferbarrier will just become a // TODO: Convert all commands into GrBatch and remove this class.
// batch blocker (when there is overlap) and the xp is responsible for issuing any barrier calls
// on the backend.
class GrTargetCommands : ::SkNoncopyable { class GrTargetCommands : ::SkNoncopyable {
public: public:
GrTargetCommands(GrGpu* gpu) GrTargetCommands(GrGpu* gpu)
@ -42,7 +40,6 @@ public:
kDrawPath_CmdType = 5, kDrawPath_CmdType = 5,
kDrawPaths_CmdType = 6, kDrawPaths_CmdType = 6,
kDrawBatch_CmdType = 7, kDrawBatch_CmdType = 7,
kXferBarrier_CmdType = 8,
}; };
Cmd(CmdType type) Cmd(CmdType type)
@ -83,8 +80,6 @@ private:
typedef GrGpu::DrawArgs DrawArgs; typedef GrGpu::DrawArgs DrawArgs;
void recordXferBarrierIfNecessary(const GrPipeline&, GrBufferedDrawTarget*);
// TODO: This can be just a pipeline once paths are in batch, and it should live elsewhere // TODO: This can be just a pipeline once paths are in batch, and it should live elsewhere
struct StateForPathDraw : public SkNVRefCnt<StateForPathDraw> { struct StateForPathDraw : public SkNVRefCnt<StateForPathDraw> {
// TODO get rid of the prim proc parameter when we use batch everywhere // TODO get rid of the prim proc parameter when we use batch everywhere
@ -251,20 +246,6 @@ private:
GrBatchTarget* fBatchTarget; GrBatchTarget* fBatchTarget;
}; };
struct XferBarrier : public Cmd {
XferBarrier(GrRenderTarget* rt)
: Cmd(kXferBarrier_CmdType)
, fRenderTarget(rt) {
}
void execute(GrGpu*) override;
GrXferBarrierType fBarrierType;
private:
GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
};
static const int kCmdBufferInitialSizeInBytes = 8 * 1024; static const int kCmdBufferInitialSizeInBytes = 8 * 1024;
typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double. typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.

View File

@ -1576,9 +1576,7 @@ public:
batchTarget->draw(vertices); batchTarget->draw(vertices);
} }
bool onCombineIfPossible(GrBatch*) override { bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
return false;
}
private: private:
TessellatingPathBatch(const GrColor& color, TessellatingPathBatch(const GrColor& color,

View File

@ -175,11 +175,11 @@ public:
return false; return false;
} }
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
private: private:
void onResetContext(uint32_t resetBits) override {} void onResetContext(uint32_t resetBits) override {}
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
GrTexture* onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle, GrTexture* onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
const void* srcData, size_t rowBytes) override { const void* srcData, size_t rowBytes) override {
return NULL; return NULL;

View File

@ -93,17 +93,15 @@ void GrXferProcessor::getGLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBu
this->onGetGLProcessorKey(caps, b); this->onGetGLProcessorKey(caps, b);
} }
bool GrXferProcessor::willNeedXferBarrier(const GrRenderTarget* rt, GrXferBarrierType GrXferProcessor::xferBarrierType(const GrRenderTarget* rt,
const GrCaps& caps, const GrCaps& caps) const {
GrXferBarrierType* outBarrierType) const { SkASSERT(rt);
if (static_cast<const GrSurface*>(rt) == this->getDstTexture()) { if (static_cast<const GrSurface*>(rt) == this->getDstTexture()) {
// Texture barriers are required when a shader reads and renders to the same texture. // Texture barriers are required when a shader reads and renders to the same texture.
SkASSERT(rt);
SkASSERT(caps.textureBarrierSupport()); SkASSERT(caps.textureBarrierSupport());
*outBarrierType = kTexture_GrXferBarrierType; return kTexture_GrXferBarrierType;
return true;
} }
return this->onWillNeedXferBarrier(rt, caps, outBarrierType); return this->onXferBarrier(rt, caps);
} }
#ifdef SK_DEBUG #ifdef SK_DEBUG

View File

@ -130,7 +130,6 @@ public:
args.fDevRect, args.fDevRect,
canTweakAlphaForCoverage); canTweakAlphaForCoverage);
} }
helper.issueDraw(batchTarget); helper.issueDraw(batchTarget);
} }
@ -176,8 +175,9 @@ private:
const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; } const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; } bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }

View File

@ -15,12 +15,10 @@ class SkMatrix;
struct SkRect; struct SkRect;
namespace GrAAFillRectBatch { namespace GrAAFillRectBatch {
GrBatch* Create(GrColor color, GrBatch* Create(GrColor color,
const SkMatrix& viewMatrix, const SkMatrix& viewMatrix,
const SkRect& rect, const SkRect& rect,
const SkRect& devRect); const SkRect& devRect);
}; };
#endif #endif

View File

@ -203,8 +203,9 @@ const GrIndexBuffer* GrAAStrokeRectBatch::GetIndexBuffer(GrResourceProvider* res
} }
} }
bool GrAAStrokeRectBatch::onCombineIfPossible(GrBatch* t) { bool GrAAStrokeRectBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }

View File

@ -78,7 +78,7 @@ private:
bool miterStroke() const { return fBatch.fMiterStroke; } bool miterStroke() const { return fBatch.fMiterStroke; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; } bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
bool onCombineIfPossible(GrBatch* t) override; bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
void generateAAStrokeRectGeometry(void* vertices, void generateAAStrokeRectGeometry(void* vertices,
size_t offset, size_t offset,

View File

@ -54,16 +54,14 @@ public:
virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0; virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0;
virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0; virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0;
bool combineIfPossible(GrBatch* that) { bool combineIfPossible(GrBatch* that, const GrCaps& caps) {
if (this->classID() != that->classID()) { if (this->classID() != that->classID()) {
return false; return false;
} }
return this->onCombineIfPossible(that); return this->onCombineIfPossible(that, caps);
} }
virtual bool onCombineIfPossible(GrBatch*) = 0;
virtual void generateGeometry(GrBatchTarget*) = 0; virtual void generateGeometry(GrBatchTarget*) = 0;
const SkRect& bounds() const { return fBounds; } const SkRect& bounds() const { return fBounds; }
@ -162,6 +160,8 @@ protected:
SkRect fBounds; SkRect fBounds;
private: private:
virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
/* /*
* initBatchTracker is a hook for the some additional overrides / optimization possibilities * initBatchTracker is a hook for the some additional overrides / optimization possibilities
* from the GrXferProcessor. * from the GrXferProcessor.

View File

@ -156,8 +156,9 @@ GrDrawAtlasBatch::GrDrawAtlasBatch(const Geometry& geometry, const SkMatrix& vie
this->setBounds(bounds); this->setBounds(bounds);
} }
bool GrDrawAtlasBatch::onCombineIfPossible(GrBatch* t) { bool GrDrawAtlasBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }

View File

@ -56,7 +56,7 @@ private:
int quadCount() const { return fQuadCount; } int quadCount() const { return fQuadCount; }
bool coverageIgnored() const { return fCoverageIgnored; } bool coverageIgnored() const { return fCoverageIgnored; }
bool onCombineIfPossible(GrBatch* t) override; bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
SkSTArray<1, Geometry, true> fGeoData; SkSTArray<1, Geometry, true> fGeoData;
SkMatrix fViewMatrix; SkMatrix fViewMatrix;

View File

@ -183,8 +183,9 @@ void GrDrawVerticesBatch::generateGeometry(GrBatchTarget* batchTarget) {
batchTarget->draw(vertices); batchTarget->draw(vertices);
} }
bool GrDrawVerticesBatch::onCombineIfPossible(GrBatch* t) { bool GrDrawVerticesBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }

View File

@ -75,7 +75,7 @@ private:
int indexCount() const { return fBatch.fIndexCount; } int indexCount() const { return fBatch.fIndexCount; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; } bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
bool onCombineIfPossible(GrBatch* t) override; bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
struct BatchTracker { struct BatchTracker {
GrPrimitiveType fPrimitiveType; GrPrimitiveType fPrimitiveType;

View File

@ -80,8 +80,9 @@ void GrRectBatch::generateGeometry(GrBatchTarget* batchTarget) {
helper.issueDraw(batchTarget); helper.issueDraw(batchTarget);
} }
bool GrRectBatch::onCombineIfPossible(GrBatch* t) { bool GrRectBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }

View File

@ -66,7 +66,7 @@ private:
bool hasLocalMatrix() const { return fGeoData[0].fHasLocalMatrix; } bool hasLocalMatrix() const { return fGeoData[0].fHasLocalMatrix; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; } bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
bool onCombineIfPossible(GrBatch* t) override; bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
const GrGeometryProcessor* createRectGP(); const GrGeometryProcessor* createRectGP();

View File

@ -50,10 +50,11 @@ private:
bool hairline() const { return fBatch.fHairline; } bool hairline() const { return fBatch.fHairline; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; } bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps&) override {
//if (!this->pipeline()->isEqual(*t->pipeline())) { // if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(),
// return false; // t->bounds(), caps)) {
//} // return false;
// }
// GrStrokeRectBatch* that = t->cast<StrokeRectBatch>(); // GrStrokeRectBatch* that = t->cast<StrokeRectBatch>();
// NonAA stroke rects cannot batch right now // NonAA stroke rects cannot batch right now

View File

@ -66,7 +66,7 @@ private:
virtual Geometry* geoData(int index) = 0; virtual Geometry* geoData(int index) = 0;
virtual const Geometry* geoData(int index) const = 0; virtual const Geometry* geoData(int index) const = 0;
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps&) override {
return false; return false;
} }

View File

@ -552,9 +552,7 @@ private:
void onGetGLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override; void onGetGLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
bool onWillNeedXferBarrier(const GrRenderTarget* rt, GrXferBarrierType onXferBarrier(const GrRenderTarget*, const GrCaps&) const override;
const GrCaps& caps,
GrXferBarrierType* outBarrierType) const override;
void onGetBlendInfo(BlendInfo*) const override; void onGetBlendInfo(BlendInfo*) const override;
@ -757,14 +755,11 @@ GrXferProcessor::OptFlags CustomXP::onGetOptimizations(const GrProcOptInfo& colo
return flags; return flags;
} }
bool CustomXP::onWillNeedXferBarrier(const GrRenderTarget* rt, GrXferBarrierType CustomXP::onXferBarrier(const GrRenderTarget* rt, const GrCaps& caps) const {
const GrCaps& caps,
GrXferBarrierType* outBarrierType) const {
if (this->hasHWBlendEquation() && !caps.advancedCoherentBlendEquationSupport()) { if (this->hasHWBlendEquation() && !caps.advancedCoherentBlendEquationSupport()) {
*outBarrierType = kBlend_GrXferBarrierType; return kBlend_GrXferBarrierType;
return true;
} }
return false; return kNone_GrXferBarrierType;
} }
void CustomXP::onGetBlendInfo(BlendInfo* blendInfo) const { void CustomXP::onGetBlendInfo(BlendInfo* blendInfo) const {

View File

@ -617,8 +617,9 @@ private:
combinedMatrix.mapRect(&fBounds); combinedMatrix.mapRect(&fBounds);
} }
bool onCombineIfPossible(GrBatch* t) override { bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
if (!this->pipeline()->isEqual(*t->pipeline())) { if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
caps)) {
return false; return false;
} }

View File

@ -3062,6 +3062,7 @@ bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst,
} }
void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) { void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
SkASSERT(type);
switch (type) { switch (type) {
case kTexture_GrXferBarrierType: { case kTexture_GrXferBarrierType: {
GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt); GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
@ -3080,6 +3081,7 @@ void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
this->caps()->blendEquationSupport()); this->caps()->blendEquationSupport());
GL_CALL(BlendBarrier()); GL_CALL(BlendBarrier());
return; return;
default: break; // placate compiler warnings that kNone not handled
} }
} }

View File

@ -96,8 +96,6 @@ public:
const SkIRect& srcRect, const SkIRect& srcRect,
const SkIPoint& dstPoint) override; const SkIPoint& dstPoint) override;
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override;
void buildProgramDesc(GrProgramDesc*, void buildProgramDesc(GrProgramDesc*,
const GrPrimitiveProcessor&, const GrPrimitiveProcessor&,
const GrPipeline&, const GrPipeline&,
@ -118,6 +116,8 @@ private:
// GrGpu overrides // GrGpu overrides
void onResetContext(uint32_t resetBits) override; void onResetContext(uint32_t resetBits) override;
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override;
GrTexture* onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle, GrTexture* onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
const void* srcData, size_t rowBytes) override; const void* srcData, size_t rowBytes) override;
GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc, GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc,

View File

@ -910,7 +910,7 @@ static void test_lcd_coverage(skiatest::Reporter* reporter, const GrCaps& caps)
const char* name() const override { return "Test LCD Text Batch"; } const char* name() const override { return "Test LCD Text Batch"; }
void initBatchTracker(const GrPipelineOptimizations&) override {} void initBatchTracker(const GrPipelineOptimizations&) override {}
bool onCombineIfPossible(GrBatch*) override { return false; } bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
void generateGeometry(GrBatchTarget*) override {} void generateGeometry(GrBatchTarget*) override {}
} testLCDCoverageBatch; } testLCDCoverageBatch;