Move state management to GrInOrderDrawBuffer
BUG=skia: Review URL: https://codereview.chromium.org/1120143002
This commit is contained in:
parent
8566539f98
commit
5d6bb6f795
@ -61,7 +61,6 @@ private:
|
||||
* The only thing(aside from a memcopy) required to flush a BitmapTextBlob is to ensure that
|
||||
* the GrAtlas will not evict anything the Blob needs.
|
||||
*/
|
||||
// TODO Pack these bytes
|
||||
struct BitmapTextBlob : public SkRefCnt {
|
||||
SK_DECLARE_INTERNAL_LLIST_INTERFACE(BitmapTextBlob);
|
||||
|
||||
|
@ -17,6 +17,7 @@ GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrContext* context,
|
||||
, fCommands(context->getGpu(), vertexPool, indexPool)
|
||||
, fPathIndexBuffer(kPathIdxBufferMinReserve * sizeof(char)/4)
|
||||
, fPathTransformBuffer(kPathXformBufferMinReserve * sizeof(float)/4)
|
||||
, fPipelineBuffer(kPipelineBufferMinReserve)
|
||||
, fDrawID(0) {
|
||||
|
||||
SkASSERT(vertexPool);
|
||||
@ -300,7 +301,12 @@ void GrInOrderDrawBuffer::onDrawRect(GrPipelineBuilder* pipelineBuilder,
|
||||
|
||||
void GrInOrderDrawBuffer::onDrawBatch(GrBatch* batch,
|
||||
const PipelineInfo& pipelineInfo) {
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordDrawBatch(this, batch, pipelineInfo);
|
||||
State* state = this->setupPipelineAndShouldDraw(batch, pipelineInfo);
|
||||
if (!state) {
|
||||
return;
|
||||
}
|
||||
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordDrawBatch(state, batch);
|
||||
this->recordTraceMarkersIfNecessary(cmd);
|
||||
}
|
||||
|
||||
@ -309,7 +315,7 @@ void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder
|
||||
const GrPath* path,
|
||||
const GrScissorState& scissorState,
|
||||
const GrStencilSettings& stencilSettings) {
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordStencilPath(this, pipelineBuilder,
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordStencilPath(pipelineBuilder,
|
||||
pathProc, path, scissorState,
|
||||
stencilSettings);
|
||||
this->recordTraceMarkersIfNecessary(cmd);
|
||||
@ -319,9 +325,11 @@ void GrInOrderDrawBuffer::onDrawPath(const GrPathProcessor* pathProc,
|
||||
const GrPath* path,
|
||||
const GrStencilSettings& stencilSettings,
|
||||
const PipelineInfo& pipelineInfo) {
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordDrawPath(this, pathProc,
|
||||
path, stencilSettings,
|
||||
pipelineInfo);
|
||||
State* state = this->setupPipelineAndShouldDraw(pathProc, pipelineInfo);
|
||||
if (!state) {
|
||||
return;
|
||||
}
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordDrawPath(state, pathProc, path, stencilSettings);
|
||||
this->recordTraceMarkersIfNecessary(cmd);
|
||||
}
|
||||
|
||||
@ -334,7 +342,11 @@ void GrInOrderDrawBuffer::onDrawPaths(const GrPathProcessor* pathProc,
|
||||
int count,
|
||||
const GrStencilSettings& stencilSettings,
|
||||
const PipelineInfo& pipelineInfo) {
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordDrawPaths(this, pathProc, pathRange,
|
||||
State* state = this->setupPipelineAndShouldDraw(pathProc, pipelineInfo);
|
||||
if (!state) {
|
||||
return;
|
||||
}
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordDrawPaths(state, this, pathProc, pathRange,
|
||||
indices, indexType, transformValues,
|
||||
transformType, count,
|
||||
stencilSettings, pipelineInfo);
|
||||
@ -343,16 +355,14 @@ void GrInOrderDrawBuffer::onDrawPaths(const GrPathProcessor* pathProc,
|
||||
|
||||
void GrInOrderDrawBuffer::onClear(const SkIRect* rect, GrColor color,
|
||||
bool canIgnoreRect, GrRenderTarget* renderTarget) {
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordClear(this, rect, color,
|
||||
canIgnoreRect, renderTarget);
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordClear(rect, color, canIgnoreRect, renderTarget);
|
||||
this->recordTraceMarkersIfNecessary(cmd);
|
||||
}
|
||||
|
||||
void GrInOrderDrawBuffer::clearStencilClip(const SkIRect& rect,
|
||||
bool insideClip,
|
||||
GrRenderTarget* renderTarget) {
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordClearStencilClip(this, rect,
|
||||
insideClip, renderTarget);
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordClearStencilClip(rect, insideClip, renderTarget);
|
||||
this->recordTraceMarkersIfNecessary(cmd);
|
||||
}
|
||||
|
||||
@ -361,7 +371,7 @@ void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) {
|
||||
return;
|
||||
}
|
||||
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordDiscard(this, renderTarget);
|
||||
GrTargetCommands::Cmd* cmd = fCommands.recordDiscard(renderTarget);
|
||||
this->recordTraceMarkersIfNecessary(cmd);
|
||||
}
|
||||
|
||||
@ -370,6 +380,15 @@ void GrInOrderDrawBuffer::onReset() {
|
||||
fPathIndexBuffer.rewind();
|
||||
fPathTransformBuffer.rewind();
|
||||
fGpuCmdMarkers.reset();
|
||||
|
||||
fPrevState.reset(NULL);
|
||||
// Note, fPrevState points into fPipelineBuffer's allocation, so we have to reset first.
|
||||
// Furthermore, we have to reset fCommands before fPipelineBuffer too.
|
||||
if (fDrawID % kPipelineBufferHighWaterMark) {
|
||||
fPipelineBuffer.rewind();
|
||||
} else {
|
||||
fPipelineBuffer.reset();
|
||||
}
|
||||
}
|
||||
|
||||
void GrInOrderDrawBuffer::onFlush() {
|
||||
@ -400,3 +419,55 @@ void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary(GrTargetCommands::Cmd* c
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GrTargetCommands::State*
|
||||
GrInOrderDrawBuffer::setupPipelineAndShouldDraw(const GrPrimitiveProcessor* primProc,
|
||||
const GrDrawTarget::PipelineInfo& pipelineInfo) {
|
||||
State* state = this->allocState();
|
||||
this->setupPipeline(pipelineInfo, state->pipelineLocation());
|
||||
|
||||
if (state->getPipeline()->mustSkip()) {
|
||||
this->unallocState(state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
state->fPrimitiveProcessor->initBatchTracker(&state->fBatchTracker,
|
||||
state->getPipeline()->getInitBatchTracker());
|
||||
|
||||
if (fPrevState && fPrevState->fPrimitiveProcessor.get() &&
|
||||
fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
|
||||
*state->fPrimitiveProcessor,
|
||||
state->fBatchTracker) &&
|
||||
fPrevState->getPipeline()->isEqual(*state->getPipeline())) {
|
||||
this->unallocState(state);
|
||||
} else {
|
||||
fPrevState.reset(state);
|
||||
}
|
||||
|
||||
fCommands.recordXferBarrierIfNecessary(*fPrevState->getPipeline(), this);
|
||||
return fPrevState;
|
||||
}
|
||||
|
||||
GrTargetCommands::State*
|
||||
GrInOrderDrawBuffer::setupPipelineAndShouldDraw(GrBatch* batch,
|
||||
const GrDrawTarget::PipelineInfo& pipelineInfo) {
|
||||
State* state = this->allocState();
|
||||
this->setupPipeline(pipelineInfo, state->pipelineLocation());
|
||||
|
||||
if (state->getPipeline()->mustSkip()) {
|
||||
this->unallocState(state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
batch->initBatchTracker(state->getPipeline()->getInitBatchTracker());
|
||||
|
||||
if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
|
||||
fPrevState->getPipeline()->isEqual(*state->getPipeline())) {
|
||||
this->unallocState(state);
|
||||
} else {
|
||||
fPrevState.reset(state);
|
||||
}
|
||||
|
||||
fCommands.recordXferBarrierIfNecessary(*fPrevState->getPipeline(), this);
|
||||
return fPrevState;
|
||||
}
|
||||
|
@ -72,6 +72,17 @@ protected:
|
||||
|
||||
private:
|
||||
friend class GrTargetCommands;
|
||||
typedef GrTargetCommands::State State;
|
||||
|
||||
State* allocState(const GrPrimitiveProcessor* primProc = NULL) {
|
||||
void* allocation = fPipelineBuffer.alloc(sizeof(State), SkChunkAlloc::kThrow_AllocFailType);
|
||||
return SkNEW_PLACEMENT_ARGS(allocation, State, (primProc));
|
||||
}
|
||||
|
||||
void unallocState(State* state) {
|
||||
state->unref();
|
||||
fPipelineBuffer.unalloc(state);
|
||||
}
|
||||
|
||||
void onReset() override;
|
||||
void onFlush() override;
|
||||
@ -84,7 +95,6 @@ private:
|
||||
const SkRect& rect,
|
||||
const SkRect* localRect,
|
||||
const SkMatrix* localMatrix) override;
|
||||
|
||||
void onStencilPath(const GrPipelineBuilder&,
|
||||
const GrPathProcessor*,
|
||||
const GrPath*,
|
||||
@ -122,17 +132,29 @@ private:
|
||||
}
|
||||
bool isIssued(uint32_t drawID) override { return drawID != fDrawID; }
|
||||
|
||||
State* SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(const GrPrimitiveProcessor*,
|
||||
const GrDrawTarget::PipelineInfo&);
|
||||
State* SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrBatch*,
|
||||
const GrDrawTarget::PipelineInfo&);
|
||||
|
||||
// TODO: Use a single allocator for commands and records
|
||||
enum {
|
||||
kPathIdxBufferMinReserve = 2 * 64, // 64 uint16_t's
|
||||
kPathXformBufferMinReserve = 2 * 64, // 64 two-float transforms
|
||||
kPipelineBufferMinReserve = 32 * sizeof(State),
|
||||
};
|
||||
|
||||
// every 100 flushes we should reset our fPipelineBuffer to prevent us from holding at a
|
||||
// highwater mark
|
||||
static const int kPipelineBufferHighWaterMark = 100;
|
||||
|
||||
GrTargetCommands fCommands;
|
||||
SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers;
|
||||
SkChunkAlloc fPathIndexBuffer;
|
||||
SkChunkAlloc fPathTransformBuffer;
|
||||
SkChunkAlloc fPipelineBuffer;
|
||||
uint32_t fDrawID;
|
||||
SkAutoTUnref<State> fPrevState;
|
||||
|
||||
typedef GrClipTarget INHERITED;
|
||||
};
|
||||
|
@ -26,27 +26,19 @@ static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettin
|
||||
return isWinding;
|
||||
}
|
||||
|
||||
GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch(
|
||||
GrInOrderDrawBuffer* iodb,
|
||||
GrBatch* batch,
|
||||
const GrDrawTarget::PipelineInfo& pipelineInfo) {
|
||||
if (!this->setupPipelineAndShouldDraw(iodb, batch, pipelineInfo)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch(State* state, GrBatch* batch) {
|
||||
// Check if there is a Batch Draw we can batch with
|
||||
if (Cmd::kDrawBatch_CmdType == fCmdBuffer.back().type()) {
|
||||
if (!fCmdBuffer.empty() && Cmd::kDrawBatch_CmdType == fCmdBuffer.back().type()) {
|
||||
DrawBatch* previous = static_cast<DrawBatch*>(&fCmdBuffer.back());
|
||||
if (previous->fBatch->combineIfPossible(batch)) {
|
||||
if (previous->fState == state && previous->fBatch->combineIfPossible(batch)) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget));
|
||||
return GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (state, batch, &fBatchTarget));
|
||||
}
|
||||
|
||||
GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath(
|
||||
GrInOrderDrawBuffer* iodb,
|
||||
const GrPipelineBuilder& pipelineBuilder,
|
||||
const GrPathProcessor* pathProc,
|
||||
const GrPath* path,
|
||||
@ -63,21 +55,17 @@ GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath(
|
||||
}
|
||||
|
||||
GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath(
|
||||
GrInOrderDrawBuffer* iodb,
|
||||
State* state,
|
||||
const GrPathProcessor* pathProc,
|
||||
const GrPath* path,
|
||||
const GrStencilSettings& stencilSettings,
|
||||
const GrDrawTarget::PipelineInfo& pipelineInfo) {
|
||||
// TODO: Only compare the subset of GrPipelineBuilder relevant to path covering?
|
||||
if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) {
|
||||
return NULL;
|
||||
}
|
||||
DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path));
|
||||
const GrStencilSettings& stencilSettings) {
|
||||
DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (state, path));
|
||||
dp->fStencilSettings = stencilSettings;
|
||||
return dp;
|
||||
}
|
||||
|
||||
GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths(
|
||||
State* state,
|
||||
GrInOrderDrawBuffer* iodb,
|
||||
const GrPathProcessor* pathProc,
|
||||
const GrPathRange* pathRange,
|
||||
@ -92,10 +80,6 @@ GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths(
|
||||
SkASSERT(indexValues);
|
||||
SkASSERT(transformValues);
|
||||
|
||||
if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
char* savedIndices;
|
||||
float* savedTransforms;
|
||||
|
||||
@ -103,7 +87,7 @@ GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths(
|
||||
transformValues, transformType,
|
||||
count, &savedIndices, &savedTransforms);
|
||||
|
||||
if (Cmd::kDrawPaths_CmdType == fCmdBuffer.back().type()) {
|
||||
if (!fCmdBuffer.empty() && Cmd::kDrawPaths_CmdType == fCmdBuffer.back().type()) {
|
||||
// The previous command was also DrawPaths. Try to collapse this call into the one
|
||||
// before. Note that stenciling all the paths at once, then covering, may not be
|
||||
// equivalent to two separate draw calls if there is overlap. Blending won't work,
|
||||
@ -117,7 +101,8 @@ GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths(
|
||||
transformType == previous->fTransformType &&
|
||||
stencilSettings == previous->fStencilSettings &&
|
||||
path_fill_type_is_winding(stencilSettings) &&
|
||||
!pipelineInfo.willBlendWithDst(pathProc)) {
|
||||
!pipelineInfo.willBlendWithDst(pathProc) &&
|
||||
previous->fState == state) {
|
||||
const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType);
|
||||
const int xformSize = GrPathRendering::PathTransformSize(transformType);
|
||||
if (&previous->fIndices[previous->fCount*indexBytes] == savedIndices &&
|
||||
@ -130,7 +115,7 @@ GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths(
|
||||
}
|
||||
}
|
||||
|
||||
DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange));
|
||||
DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (state, pathRange));
|
||||
dp->fIndices = savedIndices;
|
||||
dp->fIndexType = indexType;
|
||||
dp->fTransforms = savedTransforms;
|
||||
@ -140,8 +125,7 @@ GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths(
|
||||
return dp;
|
||||
}
|
||||
|
||||
GrTargetCommands::Cmd* GrTargetCommands::recordClear(GrInOrderDrawBuffer* iodb,
|
||||
const SkIRect* rect,
|
||||
GrTargetCommands::Cmd* GrTargetCommands::recordClear(const SkIRect* rect,
|
||||
GrColor color,
|
||||
bool canIgnoreRect,
|
||||
GrRenderTarget* renderTarget) {
|
||||
@ -163,8 +147,7 @@ GrTargetCommands::Cmd* GrTargetCommands::recordClear(GrInOrderDrawBuffer* iodb,
|
||||
return clr;
|
||||
}
|
||||
|
||||
GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(GrInOrderDrawBuffer* iodb,
|
||||
const SkIRect& rect,
|
||||
GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(const SkIRect& rect,
|
||||
bool insideClip,
|
||||
GrRenderTarget* renderTarget) {
|
||||
SkASSERT(renderTarget);
|
||||
@ -175,8 +158,7 @@ GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(GrInOrderDrawBuf
|
||||
return clr;
|
||||
}
|
||||
|
||||
GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrInOrderDrawBuffer* iodb,
|
||||
GrRenderTarget* renderTarget) {
|
||||
GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrRenderTarget* renderTarget) {
|
||||
SkASSERT(renderTarget);
|
||||
|
||||
Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
|
||||
@ -186,7 +168,6 @@ GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrInOrderDrawBuffer* iodb
|
||||
|
||||
void GrTargetCommands::reset() {
|
||||
fCmdBuffer.reset();
|
||||
fPrevState = NULL;
|
||||
}
|
||||
|
||||
void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) {
|
||||
@ -194,10 +175,6 @@ void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Updated every time we find a set state cmd to reflect the current state in the playback
|
||||
// stream.
|
||||
SetState* currentState = NULL;
|
||||
|
||||
GrGpu* gpu = iodb->getGpu();
|
||||
|
||||
// Loop over all batches and generate geometry
|
||||
@ -206,13 +183,8 @@ void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) {
|
||||
if (Cmd::kDrawBatch_CmdType == genIter->type()) {
|
||||
DrawBatch* db = reinterpret_cast<DrawBatch*>(genIter.get());
|
||||
fBatchTarget.resetNumberOfDraws();
|
||||
db->execute(NULL, currentState);
|
||||
db->fBatch->generateGeometry(&fBatchTarget, db->fState->getPipeline());
|
||||
db->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws());
|
||||
} else if (Cmd::kSetState_CmdType == genIter->type()) {
|
||||
SetState* ss = reinterpret_cast<SetState*>(genIter.get());
|
||||
|
||||
ss->execute(gpu, currentState);
|
||||
currentState = ss;
|
||||
}
|
||||
}
|
||||
|
||||
@ -231,29 +203,7 @@ void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) {
|
||||
gpu->addGpuTraceMarker(&newMarker);
|
||||
}
|
||||
|
||||
if (Cmd::kDrawBatch_CmdType == iter->type()) {
|
||||
DrawBatch* db = reinterpret_cast<DrawBatch*>(iter.get());
|
||||
fBatchTarget.flushNext(db->fBatch->numberOfDraws());
|
||||
|
||||
if (iter->isTraced()) {
|
||||
gpu->removeGpuTraceMarker(&newMarker);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (Cmd::kSetState_CmdType == iter->type()) {
|
||||
// TODO this is just until NVPR is in batch
|
||||
SetState* ss = reinterpret_cast<SetState*>(iter.get());
|
||||
|
||||
if (ss->fPrimitiveProcessor) {
|
||||
ss->execute(gpu, currentState);
|
||||
}
|
||||
currentState = ss;
|
||||
|
||||
} else {
|
||||
iter->execute(gpu, currentState);
|
||||
}
|
||||
|
||||
iter->execute(gpu);
|
||||
if (iter->isTraced()) {
|
||||
gpu->removeGpuTraceMarker(&newMarker);
|
||||
}
|
||||
@ -262,7 +212,7 @@ void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) {
|
||||
fBatchTarget.postFlush();
|
||||
}
|
||||
|
||||
void GrTargetCommands::StencilPath::execute(GrGpu* gpu, const SetState*) {
|
||||
void GrTargetCommands::StencilPath::execute(GrGpu* gpu) {
|
||||
GrGpu::StencilPathState state;
|
||||
state.fRenderTarget = fRenderTarget.get();
|
||||
state.fScissor = &fScissor;
|
||||
@ -273,37 +223,36 @@ void GrTargetCommands::StencilPath::execute(GrGpu* gpu, const SetState*) {
|
||||
gpu->stencilPath(this->path(), state);
|
||||
}
|
||||
|
||||
void GrTargetCommands::DrawPath::execute(GrGpu* gpu, const SetState* state) {
|
||||
SkASSERT(state);
|
||||
DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
|
||||
&state->fBatchTracker);
|
||||
void GrTargetCommands::DrawPath::execute(GrGpu* gpu) {
|
||||
if (!fState->fCompiled) {
|
||||
gpu->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor, *fState->getPipeline(),
|
||||
fState->fBatchTracker);
|
||||
fState->fCompiled = true;
|
||||
}
|
||||
DrawArgs args(fState->fPrimitiveProcessor.get(), fState->getPipeline(),
|
||||
&fState->fDesc, &fState->fBatchTracker);
|
||||
gpu->drawPath(args, this->path(), fStencilSettings);
|
||||
}
|
||||
|
||||
void GrTargetCommands::DrawPaths::execute(GrGpu* gpu, const SetState* state) {
|
||||
SkASSERT(state);
|
||||
DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
|
||||
&state->fBatchTracker);
|
||||
void GrTargetCommands::DrawPaths::execute(GrGpu* gpu) {
|
||||
if (!fState->fCompiled) {
|
||||
gpu->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor, *fState->getPipeline(),
|
||||
fState->fBatchTracker);
|
||||
fState->fCompiled = true;
|
||||
}
|
||||
DrawArgs args(fState->fPrimitiveProcessor.get(), fState->getPipeline(),
|
||||
&fState->fDesc, &fState->fBatchTracker);
|
||||
gpu->drawPaths(args, this->pathRange(),
|
||||
fIndices, fIndexType,
|
||||
fTransforms, fTransformType,
|
||||
fCount, fStencilSettings);
|
||||
}
|
||||
|
||||
void GrTargetCommands::DrawBatch::execute(GrGpu*, const SetState* state) {
|
||||
SkASSERT(state);
|
||||
fBatch->generateGeometry(fBatchTarget, state->getPipeline());
|
||||
void GrTargetCommands::DrawBatch::execute(GrGpu*) {
|
||||
fBatchTarget->flushNext(fBatch->numberOfDraws());
|
||||
}
|
||||
|
||||
void GrTargetCommands::SetState::execute(GrGpu* gpu, const SetState*) {
|
||||
// TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we
|
||||
// will only have GrBatch and we can delete this
|
||||
if (fPrimitiveProcessor) {
|
||||
gpu->buildProgramDesc(&fDesc, *fPrimitiveProcessor, *getPipeline(), fBatchTracker);
|
||||
}
|
||||
}
|
||||
|
||||
void GrTargetCommands::Clear::execute(GrGpu* gpu, const SetState*) {
|
||||
void GrTargetCommands::Clear::execute(GrGpu* gpu) {
|
||||
if (GrColor_ILLEGAL == fColor) {
|
||||
gpu->discard(this->renderTarget());
|
||||
} else {
|
||||
@ -311,15 +260,15 @@ void GrTargetCommands::Clear::execute(GrGpu* gpu, const SetState*) {
|
||||
}
|
||||
}
|
||||
|
||||
void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu, const SetState*) {
|
||||
void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu) {
|
||||
gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget());
|
||||
}
|
||||
|
||||
void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) {
|
||||
void GrTargetCommands::CopySurface::execute(GrGpu* gpu) {
|
||||
gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
|
||||
}
|
||||
|
||||
void GrTargetCommands::XferBarrier::execute(GrGpu* gpu, const SetState* state) {
|
||||
void GrTargetCommands::XferBarrier::execute(GrGpu* gpu) {
|
||||
gpu->xferBarrier(fBarrierType);
|
||||
}
|
||||
|
||||
@ -333,65 +282,10 @@ GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrSurface* dst,
|
||||
return cs;
|
||||
}
|
||||
|
||||
bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb,
|
||||
const GrPrimitiveProcessor* primProc,
|
||||
const GrDrawTarget::PipelineInfo& pipelineInfo) {
|
||||
SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (primProc));
|
||||
iodb->setupPipeline(pipelineInfo, ss->pipelineLocation());
|
||||
|
||||
if (ss->getPipeline()->mustSkip()) {
|
||||
fCmdBuffer.pop_back();
|
||||
return false;
|
||||
}
|
||||
|
||||
ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker,
|
||||
ss->getPipeline()->getInitBatchTracker());
|
||||
|
||||
if (fPrevState && fPrevState->fPrimitiveProcessor.get() &&
|
||||
fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
|
||||
*ss->fPrimitiveProcessor,
|
||||
ss->fBatchTracker) &&
|
||||
fPrevState->getPipeline()->isEqual(*ss->getPipeline())) {
|
||||
fCmdBuffer.pop_back();
|
||||
} else {
|
||||
fPrevState = ss;
|
||||
iodb->recordTraceMarkersIfNecessary(ss);
|
||||
}
|
||||
|
||||
this->recordXferBarrierIfNecessary(iodb, pipelineInfo);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb,
|
||||
GrBatch* batch,
|
||||
const GrDrawTarget::PipelineInfo& pipelineInfo) {
|
||||
SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, ());
|
||||
iodb->setupPipeline(pipelineInfo, ss->pipelineLocation());
|
||||
|
||||
if (ss->getPipeline()->mustSkip()) {
|
||||
fCmdBuffer.pop_back();
|
||||
return false;
|
||||
}
|
||||
|
||||
batch->initBatchTracker(ss->getPipeline()->getInitBatchTracker());
|
||||
|
||||
if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
|
||||
fPrevState->getPipeline()->isEqual(*ss->getPipeline())) {
|
||||
fCmdBuffer.pop_back();
|
||||
} else {
|
||||
fPrevState = ss;
|
||||
iodb->recordTraceMarkersIfNecessary(ss);
|
||||
}
|
||||
|
||||
this->recordXferBarrierIfNecessary(iodb, pipelineInfo);
|
||||
return true;
|
||||
}
|
||||
|
||||
void GrTargetCommands::recordXferBarrierIfNecessary(GrInOrderDrawBuffer* iodb,
|
||||
const GrDrawTarget::PipelineInfo& info) {
|
||||
SkASSERT(fPrevState);
|
||||
const GrXferProcessor& xp = *fPrevState->getXferProcessor();
|
||||
GrRenderTarget* rt = fPrevState->getRenderTarget();
|
||||
void GrTargetCommands::recordXferBarrierIfNecessary(const GrPipeline& pipeline,
|
||||
GrInOrderDrawBuffer* iodb) {
|
||||
const GrXferProcessor& xp = *pipeline.getXferProcessor();
|
||||
GrRenderTarget* rt = pipeline.getRenderTarget();
|
||||
|
||||
GrXferBarrierType barrierType;
|
||||
if (!xp.willNeedXferBarrier(rt, *iodb->caps(), &barrierType)) {
|
||||
|
@ -24,6 +24,7 @@ class GrVertexBufferAllocPool;
|
||||
class GrIndexBufferAllocPool;
|
||||
|
||||
class GrTargetCommands : ::SkNoncopyable {
|
||||
struct State;
|
||||
struct SetState;
|
||||
|
||||
public:
|
||||
@ -31,7 +32,6 @@ public:
|
||||
GrVertexBufferAllocPool* vertexPool,
|
||||
GrIndexBufferAllocPool* indexPool)
|
||||
: fCmdBuffer(kCmdBufferInitialSizeInBytes)
|
||||
, fPrevState(NULL)
|
||||
, fBatchTarget(gpu, vertexPool, indexPool) {
|
||||
}
|
||||
|
||||
@ -51,7 +51,7 @@ public:
|
||||
Cmd(CmdType type) : fMarkerID(-1), fType(type) {}
|
||||
virtual ~Cmd() {}
|
||||
|
||||
virtual void execute(GrGpu*, const SetState*) = 0;
|
||||
virtual void execute(GrGpu*) = 0;
|
||||
|
||||
CmdType type() const { return fType; }
|
||||
|
||||
@ -68,32 +68,23 @@ public:
|
||||
void reset();
|
||||
void flush(GrInOrderDrawBuffer*);
|
||||
|
||||
Cmd* recordClearStencilClip(GrInOrderDrawBuffer*,
|
||||
const SkIRect& rect,
|
||||
Cmd* recordClearStencilClip(const SkIRect& rect,
|
||||
bool insideClip,
|
||||
GrRenderTarget* renderTarget);
|
||||
|
||||
Cmd* recordDiscard(GrInOrderDrawBuffer*, GrRenderTarget*);
|
||||
|
||||
Cmd* recordDraw(GrInOrderDrawBuffer*,
|
||||
const GrGeometryProcessor*,
|
||||
const GrDrawTarget::DrawInfo&,
|
||||
const GrDrawTarget::PipelineInfo&);
|
||||
Cmd* recordDrawBatch(GrInOrderDrawBuffer*,
|
||||
GrBatch*,
|
||||
const GrDrawTarget::PipelineInfo&);
|
||||
Cmd* recordStencilPath(GrInOrderDrawBuffer*,
|
||||
const GrPipelineBuilder&,
|
||||
Cmd* recordDiscard(GrRenderTarget*);
|
||||
Cmd* recordDrawBatch(State*, GrBatch*);
|
||||
Cmd* recordStencilPath(const GrPipelineBuilder&,
|
||||
const GrPathProcessor*,
|
||||
const GrPath*,
|
||||
const GrScissorState&,
|
||||
const GrStencilSettings&);
|
||||
Cmd* recordDrawPath(GrInOrderDrawBuffer*,
|
||||
Cmd* recordDrawPath(State*,
|
||||
const GrPathProcessor*,
|
||||
const GrPath*,
|
||||
const GrStencilSettings&,
|
||||
const GrDrawTarget::PipelineInfo&);
|
||||
Cmd* recordDrawPaths(GrInOrderDrawBuffer*,
|
||||
const GrStencilSettings&);
|
||||
Cmd* recordDrawPaths(State*,
|
||||
GrInOrderDrawBuffer*,
|
||||
const GrPathProcessor*,
|
||||
const GrPathRange*,
|
||||
const void*,
|
||||
@ -103,8 +94,7 @@ public:
|
||||
int,
|
||||
const GrStencilSettings&,
|
||||
const GrDrawTarget::PipelineInfo&);
|
||||
Cmd* recordClear(GrInOrderDrawBuffer*,
|
||||
const SkIRect* rect,
|
||||
Cmd* recordClear(const SkIRect* rect,
|
||||
GrColor,
|
||||
bool canIgnoreRect,
|
||||
GrRenderTarget*);
|
||||
@ -118,18 +108,51 @@ private:
|
||||
|
||||
typedef GrGpu::DrawArgs DrawArgs;
|
||||
|
||||
// Attempts to concat instances from info onto the previous draw. info must represent an
|
||||
// instanced draw. The caller must have already recorded a new draw state and clip if necessary.
|
||||
int concatInstancedDraw(GrInOrderDrawBuffer*, const GrDrawTarget::DrawInfo&);
|
||||
void recordXferBarrierIfNecessary(const GrPipeline&, GrInOrderDrawBuffer*);
|
||||
|
||||
bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*,
|
||||
const GrPrimitiveProcessor*,
|
||||
const GrDrawTarget::PipelineInfo&);
|
||||
bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*,
|
||||
GrBatch*,
|
||||
const GrDrawTarget::PipelineInfo&);
|
||||
// TODO: This can be just a pipeline once paths are in batch, and it should live elsewhere
|
||||
struct State : public SkRefCnt {
|
||||
// TODO get rid of the prim proc parameter when we use batch everywhere
|
||||
State(const GrPrimitiveProcessor* primProc = NULL)
|
||||
: fPrimitiveProcessor(primProc)
|
||||
, fCompiled(false) {}
|
||||
|
||||
void recordXferBarrierIfNecessary(GrInOrderDrawBuffer*, const GrDrawTarget::PipelineInfo&);
|
||||
~State() { reinterpret_cast<GrPipeline*>(fPipeline.get())->~GrPipeline(); }
|
||||
|
||||
// This function is only for getting the location in memory where we will create our
|
||||
// pipeline object.
|
||||
GrPipeline* pipelineLocation() { return reinterpret_cast<GrPipeline*>(fPipeline.get()); }
|
||||
|
||||
const GrPipeline* getPipeline() const {
|
||||
return reinterpret_cast<const GrPipeline*>(fPipeline.get());
|
||||
}
|
||||
GrRenderTarget* getRenderTarget() const {
|
||||
return this->getPipeline()->getRenderTarget();
|
||||
}
|
||||
const GrXferProcessor* getXferProcessor() const {
|
||||
return this->getPipeline()->getXferProcessor();
|
||||
}
|
||||
|
||||
void operator delete(void* p) {
|
||||
//SkDebugf("destruction\n");
|
||||
}
|
||||
void* operator new(size_t) {
|
||||
SkFAIL("All States are created by placement new.");
|
||||
return sk_malloc_throw(0);
|
||||
}
|
||||
|
||||
void* operator new(size_t, void* p) { return p; }
|
||||
void operator delete(void* target, void* placement) {
|
||||
::operator delete(target, placement);
|
||||
}
|
||||
|
||||
typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimitiveProcessor;
|
||||
ProgramPrimitiveProcessor fPrimitiveProcessor;
|
||||
SkAlignedSStorage<sizeof(GrPipeline)> fPipeline;
|
||||
GrProgramDesc fDesc;
|
||||
GrBatchTracker fBatchTracker;
|
||||
bool fCompiled;
|
||||
};
|
||||
|
||||
struct StencilPath : public Cmd {
|
||||
StencilPath(const GrPath* path, GrRenderTarget* rt)
|
||||
@ -139,7 +162,7 @@ private:
|
||||
|
||||
const GrPath* path() const { return fPath.get(); }
|
||||
|
||||
void execute(GrGpu*, const SetState*) override;
|
||||
void execute(GrGpu*) override;
|
||||
|
||||
SkMatrix fViewMatrix;
|
||||
bool fUseHWAA;
|
||||
@ -151,25 +174,32 @@ private:
|
||||
};
|
||||
|
||||
struct DrawPath : public Cmd {
|
||||
DrawPath(const GrPath* path) : Cmd(kDrawPath_CmdType), fPath(path) {}
|
||||
DrawPath(State* state, const GrPath* path)
|
||||
: Cmd(kDrawPath_CmdType)
|
||||
, fState(SkRef(state))
|
||||
, fPath(path) {}
|
||||
|
||||
const GrPath* path() const { return fPath.get(); }
|
||||
|
||||
void execute(GrGpu*, const SetState*) override;
|
||||
void execute(GrGpu*) override;
|
||||
|
||||
SkAutoTUnref<State> fState;
|
||||
GrStencilSettings fStencilSettings;
|
||||
|
||||
private:
|
||||
GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
|
||||
};
|
||||
|
||||
struct DrawPaths : public Cmd {
|
||||
DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_CmdType), fPathRange(pathRange) {}
|
||||
DrawPaths(State* state, const GrPathRange* pathRange)
|
||||
: Cmd(kDrawPaths_CmdType)
|
||||
, fState(SkRef(state))
|
||||
, fPathRange(pathRange) {}
|
||||
|
||||
const GrPathRange* pathRange() const { return fPathRange.get(); }
|
||||
|
||||
void execute(GrGpu*, const SetState*) override;
|
||||
void execute(GrGpu*) override;
|
||||
|
||||
SkAutoTUnref<State> fState;
|
||||
char* fIndices;
|
||||
GrDrawTarget::PathIndexType fIndexType;
|
||||
float* fTransforms;
|
||||
@ -187,7 +217,7 @@ private:
|
||||
|
||||
GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
|
||||
|
||||
void execute(GrGpu*, const SetState*) override;
|
||||
void execute(GrGpu*) override;
|
||||
|
||||
SkIRect fRect;
|
||||
GrColor fColor;
|
||||
@ -203,7 +233,7 @@ private:
|
||||
|
||||
GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
|
||||
|
||||
void execute(GrGpu*, const SetState*) override;
|
||||
void execute(GrGpu*) override;
|
||||
|
||||
SkIRect fRect;
|
||||
bool fInsideClip;
|
||||
@ -222,7 +252,7 @@ private:
|
||||
GrSurface* dst() const { return fDst.get(); }
|
||||
GrSurface* src() const { return fSrc.get(); }
|
||||
|
||||
void execute(GrGpu*, const SetState*) override;
|
||||
void execute(GrGpu*) override;
|
||||
|
||||
SkIPoint fDstPoint;
|
||||
SkIRect fSrcRect;
|
||||
@ -232,49 +262,18 @@ private:
|
||||
GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc;
|
||||
};
|
||||
|
||||
// TODO: rename to SetPipeline once pp, batch tracker, and desc are removed
|
||||
struct SetState : public Cmd {
|
||||
// TODO get rid of the prim proc parameter when we use batch everywhere
|
||||
SetState(const GrPrimitiveProcessor* primProc = NULL)
|
||||
: Cmd(kSetState_CmdType)
|
||||
, fPrimitiveProcessor(primProc) {}
|
||||
|
||||
~SetState() { reinterpret_cast<GrPipeline*>(fPipeline.get())->~GrPipeline(); }
|
||||
|
||||
// This function is only for getting the location in memory where we will create our
|
||||
// pipeline object.
|
||||
GrPipeline* pipelineLocation() { return reinterpret_cast<GrPipeline*>(fPipeline.get()); }
|
||||
|
||||
const GrPipeline* getPipeline() const {
|
||||
return reinterpret_cast<const GrPipeline*>(fPipeline.get());
|
||||
}
|
||||
GrRenderTarget* getRenderTarget() const {
|
||||
return this->getPipeline()->getRenderTarget();
|
||||
}
|
||||
const GrXferProcessor* getXferProcessor() const {
|
||||
return this->getPipeline()->getXferProcessor();
|
||||
}
|
||||
|
||||
void execute(GrGpu*, const SetState*) override;
|
||||
|
||||
typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimitiveProcessor;
|
||||
ProgramPrimitiveProcessor fPrimitiveProcessor;
|
||||
SkAlignedSStorage<sizeof(GrPipeline)> fPipeline;
|
||||
GrProgramDesc fDesc;
|
||||
GrBatchTracker fBatchTracker;
|
||||
};
|
||||
|
||||
struct DrawBatch : public Cmd {
|
||||
DrawBatch(GrBatch* batch, GrBatchTarget* batchTarget)
|
||||
DrawBatch(State* state, GrBatch* batch, GrBatchTarget* batchTarget)
|
||||
: Cmd(kDrawBatch_CmdType)
|
||||
, fState(SkRef(state))
|
||||
, fBatch(SkRef(batch))
|
||||
, fBatchTarget(batchTarget) {
|
||||
SkASSERT(!batch->isUsed());
|
||||
}
|
||||
|
||||
void execute(GrGpu*, const SetState*) override;
|
||||
void execute(GrGpu*) override;
|
||||
|
||||
// TODO it wouldn't be too hard to let batches allocate in the cmd buffer
|
||||
SkAutoTUnref<State> fState;
|
||||
SkAutoTUnref<GrBatch> fBatch;
|
||||
|
||||
private:
|
||||
@ -284,19 +283,18 @@ private:
|
||||
struct XferBarrier : public Cmd {
|
||||
XferBarrier() : Cmd(kXferBarrier_CmdType) {}
|
||||
|
||||
void execute(GrGpu*, const SetState*) override;
|
||||
void execute(GrGpu*) override;
|
||||
|
||||
GrXferBarrierType fBarrierType;
|
||||
};
|
||||
|
||||
static const int kCmdBufferInitialSizeInBytes = 8 * 1024;
|
||||
static const int kCmdBufferInitialSizeInBytes = 8 * 1024;
|
||||
|
||||
typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.
|
||||
typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer;
|
||||
typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.
|
||||
typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer;
|
||||
|
||||
CmdBuffer fCmdBuffer;
|
||||
SetState* fPrevState;
|
||||
GrBatchTarget fBatchTarget;
|
||||
CmdBuffer fCmdBuffer;
|
||||
GrBatchTarget fBatchTarget;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user