Split up opLists

Split into:
   https://skia-review.googlesource.com/c/11793/ (Remove lastProxy capability from GrSurface)

Change-Id: I903ba30e17de4aab8cb0d2cc3281ae5c262142f9
Reviewed-on: https://skia-review.googlesource.com/11581
Commit-Queue: Robert Phillips <robertphillips@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
This commit is contained in:
Robert Phillips 2017-04-18 11:21:38 -04:00 committed by Skia Commit-Bot
parent 6e83479994
commit bc8ee52d46
11 changed files with 67 additions and 117 deletions

View File

@ -378,9 +378,9 @@ private:
// This back-pointer is required so that we can add a dependancy between
// the opList used to create the current contents of this surface
// and the opList of a destination surface to which this one is being drawn or copied.
// This pointer is unreffed. OpLists own a ref on their surface proxies.
GrOpList* fLastOpList;
typedef GrIORefProxy INHERITED;
};

View File

@ -26,10 +26,10 @@
void GrDrawingManager::cleanup() {
for (int i = 0; i < fOpLists.count(); ++i) {
fOpLists[i]->makeClosed(); // no opList should receive a new command after this
fOpLists[i]->clearTarget();
// We shouldn't need to do this, but it turns out some clients still hold onto opLists
// after a cleanup
// after a cleanup.
// MDB TODO: is this still true?
fOpLists[i]->reset();
}
@ -83,6 +83,7 @@ void GrDrawingManager::internalFlush(GrSurfaceProxy*, GrResourceCache::FlushType
// but need to be flushed anyway. Closing such GrOpLists here will mean new
// GrOpLists will be created to replace them if the SkGpuDevice(s) write to them again.
fOpLists[i]->makeClosed();
SkDEBUGCODE(fOpLists[i]->validateTargetsSingleRenderTarget());
}
#ifdef ENABLE_MDB
@ -115,6 +116,7 @@ void GrDrawingManager::internalFlush(GrSurfaceProxy*, GrResourceCache::FlushType
if (!opList) {
continue; // Odd - but not a big deal
}
opList->makeClosed();
SkDEBUGCODE(opList->validateTargetsSingleRenderTarget());
opList->prepareOps(&fFlushState);
if (!opList->executeOps(&fFlushState)) {
@ -151,17 +153,7 @@ void GrDrawingManager::internalFlush(GrSurfaceProxy*, GrResourceCache::FlushType
fOpLists[i]->reset();
}
#ifndef ENABLE_MDB
// When MDB is disabled we keep reusing the same GrOpList
if (fOpLists.count()) {
SkASSERT(fOpLists.count() == 1);
// Clear out this flag so the topological sort's SkTTopoSort_CheckAllUnmarked check
// won't bark
fOpLists[0]->resetFlag(GrOpList::kWasOutput_Flag);
}
#else
fOpLists.reset();
#endif
fFlushState.reset();
// We always have to notify the cache when it requested a flush so it can reset its state.
@ -198,19 +190,12 @@ void GrDrawingManager::addPreFlushCallbackObject(sk_sp<GrPreFlushCallbackObject>
sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(sk_sp<GrRenderTargetProxy> rtp) {
SkASSERT(fContext);
#ifndef ENABLE_MDB
// When MDB is disabled we always just return the single GrOpList
if (fOpLists.count()) {
SkASSERT(fOpLists.count() == 1);
// In the non-MDB-world the same GrOpList gets reused for multiple render targets.
// Update this pointer so all the asserts are happy
rtp->setLastOpList(fOpLists[0].get());
// DrawingManager gets the creation ref - this ref is for the caller
// TODO: although this is true right now it isn't cool
return sk_ref_sp((GrRenderTargetOpList*) fOpLists[0].get());
// This is a temporary fix for the partial-MDB world. In that world we're not reordering
// so ops that (in the single opList world) would've just glommed onto the end of the single
// opList but referred to a far earlier RT need to appear in their own opList.
if (!fOpLists.empty()) {
fOpLists.back()->makeClosed();
}
#endif
sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(rtp,
fContext->getGpu(),
@ -227,19 +212,21 @@ sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(sk_sp<GrRenderTargetPr
sk_sp<GrTextureOpList> GrDrawingManager::newTextureOpList(sk_sp<GrTextureProxy> textureProxy) {
SkASSERT(fContext);
sk_sp<GrTextureOpList> opList(new GrTextureOpList(std::move(textureProxy), fContext->getGpu(),
// This is a temporary fix for the partial-MDB world. In that world we're not reordering
// so ops that (in the single opList world) would've just glommed onto the end of the single
// opList but referred to a far earlier RT need to appear in their own opList.
if (!fOpLists.empty()) {
fOpLists.back()->makeClosed();
}
sk_sp<GrTextureOpList> opList(new GrTextureOpList(textureProxy, fContext->getGpu(),
fContext->getAuditTrail()));
#ifndef ENABLE_MDB
// When MDB is disabled we still create a new GrOpList, but don't store or ref it - we rely
// on the caller to immediately execute and free it.
return opList;
#else
*fOpLists.append() = opList;
SkASSERT(textureProxy->getLastOpList() == opList.get());
fOpLists.push_back() = opList;
// Drawing manager gets the creation ref - this ref is for the caller
return opList;
#endif
}
GrAtlasTextContext* GrDrawingManager::getAtlasTextContext() {

View File

@ -21,9 +21,7 @@ uint32_t GrOpList::CreateUniqueID() {
}
GrOpList::GrOpList(sk_sp<GrSurfaceProxy> surfaceProxy, GrAuditTrail* auditTrail)
// MDB TODO: in the future opLists will own the GrSurfaceProxy they target.
// For now, preserve the status quo.
: fTarget(surfaceProxy.get())
: fTarget(surfaceProxy)
, fAuditTrail(auditTrail)
, fUniqueID(CreateUniqueID())
, fFlags(0) {

View File

@ -29,11 +29,7 @@ public:
virtual bool executeOps(GrOpFlushState* flushState) = 0;
virtual void makeClosed() {
// We only close GrOpLists when MDB is enabled. When MDB is disabled there is only
// ever one GrOpLists and all calls will be funnelled into it.
#ifdef ENABLE_MDB
this->setFlag(kClosed_Flag);
#endif
}
// TODO: it seems a bit odd that GrOpList has nothing to clear on reset
@ -44,10 +40,6 @@ public:
virtual void abandonGpuResources() = 0;
virtual void freeGpuResources() = 0;
// TODO: this entry point is only needed in the non-MDB world. Remove when
// we make the switch to MDB
void clearTarget() { fTarget = nullptr; }
bool isClosed() const { return this->isSetFlag(kClosed_Flag); }
/*
@ -82,8 +74,8 @@ public:
SkDEBUGCODE(virtual void validateTargetsSingleRenderTarget() const = 0;)
protected:
GrSurfaceProxy* fTarget;
GrAuditTrail* fAuditTrail;
sk_sp<GrSurfaceProxy> fTarget;
GrAuditTrail* fAuditTrail;
private:
friend class GrDrawingManager; // for resetFlag & TopoSortTraits
@ -135,11 +127,11 @@ private:
void addDependency(GrOpList* dependedOn);
uint32_t fUniqueID;
uint32_t fFlags;
uint32_t fUniqueID;
uint32_t fFlags;
// 'this' GrOpList relies on the output of the GrOpLists in 'fDependencies'
SkTDArray<GrOpList*> fDependencies;
SkTDArray<GrOpList*> fDependencies;
typedef SkRefCnt INHERITED;
};

View File

@ -30,16 +30,6 @@ sk_sp<GrRenderTargetContext> GrPreFlushResourceProvider::makeRenderTargetContext
return nullptr;
}
// MDB TODO: This explicit resource creation is required in the pre-MDB world so that the
// pre-Flush ops are placed in their own opList.
sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(
sk_ref_sp(proxy->asRenderTargetProxy()),
fDrawingMgr->fContext->getGpu(),
fDrawingMgr->fContext->resourceProvider(),
fDrawingMgr->fContext->getAuditTrail(),
fDrawingMgr->fOptionsForOpLists));
proxy->setLastOpList(opList.get());
sk_sp<GrRenderTargetContext> renderTargetContext(
fDrawingMgr->makeRenderTargetContext(std::move(proxy),
std::move(colorSpace),
@ -60,16 +50,6 @@ sk_sp<GrRenderTargetContext> GrPreFlushResourceProvider::makeRenderTargetContext
sk_sp<GrSurfaceProxy> proxy,
sk_sp<SkColorSpace> colorSpace,
const SkSurfaceProps* props) {
// MDB TODO: This explicit resource creation is required in the pre-MDB world so that the
// pre-Flush ops are placed in their own opList.
sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(
sk_ref_sp(proxy->asRenderTargetProxy()),
fDrawingMgr->fContext->getGpu(),
fDrawingMgr->fContext->resourceProvider(),
fDrawingMgr->fContext->getAuditTrail(),
fDrawingMgr->fOptionsForOpLists));
proxy->setLastOpList(opList.get());
sk_sp<GrRenderTargetContext> renderTargetContext(
fDrawingMgr->makeRenderTargetContext(std::move(proxy),
std::move(colorSpace),

View File

@ -128,7 +128,7 @@ GrRenderTargetOpList* GrRenderTargetContext::getOpList() {
return fOpList.get();
}
// TODO: move this (and GrTextContext::copy) to GrSurfaceContext?
// MDB TODO: move this (and GrTextContext::copy) to GrSurfaceContext?
bool GrRenderTargetContext::onCopy(GrSurfaceProxy* srcProxy,
const SkIRect& srcRect,
const SkIPoint& dstPoint) {

View File

@ -87,7 +87,7 @@ void GrRenderTargetOpList::validateTargetsSingleRenderTarget() const {
#endif
void GrRenderTargetOpList::prepareOps(GrOpFlushState* flushState) {
// MDB TODO: add SkASSERT(this->isClosed());
SkASSERT(this->isClosed());
// Loop over the ops that haven't yet been prepared.
for (int i = 0; i < fRecordedOps.count(); ++i) {
@ -140,34 +140,34 @@ bool GrRenderTargetOpList::executeOps(GrOpFlushState* flushState) {
if (0 == fRecordedOps.count()) {
return false;
}
// Draw all the generated geometry.
const GrRenderTarget* currentRenderTarget = fRecordedOps[0].fRenderTarget.get();
SkASSERT(currentRenderTarget);
std::unique_ptr<GrGpuCommandBuffer> commandBuffer;
#ifdef SK_DEBUG
GrSurface* target = fTarget->instantiate(flushState->resourceProvider());
if (!target) {
return false;
}
const GrRenderTarget* currentRenderTarget = target->asRenderTarget();
SkASSERT(currentRenderTarget);
#endif
std::unique_ptr<GrGpuCommandBuffer> commandBuffer = create_command_buffer(fGpu);
flushState->setCommandBuffer(commandBuffer.get());
// Draw all the generated geometry.
for (int i = 0; i < fRecordedOps.count(); ++i) {
if (!fRecordedOps[i].fOp) {
continue;
}
SkASSERT(fRecordedOps[i].fRenderTarget.get());
SkASSERT(fRecordedOps[i].fRenderTarget.get() == currentRenderTarget);
if (fRecordedOps[i].fOp->needsCommandBufferIsolation()) {
// This op is a special snowflake and must occur between command buffers
// TODO: make this go through the command buffer
finish_command_buffer(commandBuffer.get());
currentRenderTarget = fRecordedOps[i].fRenderTarget.get();
commandBuffer.reset();
flushState->setCommandBuffer(commandBuffer.get());
} else if (fRecordedOps[i].fRenderTarget.get() != currentRenderTarget) {
// Changing renderTarget
// MDB TODO: this code path goes away
finish_command_buffer(commandBuffer.get());
currentRenderTarget = fRecordedOps[i].fRenderTarget.get();
commandBuffer = create_command_buffer(fGpu);
flushState->setCommandBuffer(commandBuffer.get());
} else if (!commandBuffer) {
commandBuffer = create_command_buffer(fGpu);
flushState->setCommandBuffer(commandBuffer.get());
@ -255,6 +255,7 @@ void GrRenderTargetOpList::fullClear(GrRenderTargetContext* renderTargetContext,
////////////////////////////////////////////////////////////////////////////////
// MDB TODO: fuse with GrTextureOpList::copySurface
bool GrRenderTargetOpList::copySurface(GrResourceProvider* resourceProvider,
GrRenderTargetContext* dst,
GrSurfaceProxy* src,
@ -311,6 +312,13 @@ GrOp* GrRenderTargetOpList::recordOp(std::unique_ptr<GrOp> op,
return nullptr;
}
#ifdef SK_DEBUG
if (!fRecordedOps.empty()) {
GrRenderTargetOpList::RecordedOp& back = fRecordedOps.back();
SkASSERT(back.fRenderTarget == renderTarget);
}
#endif
// A closed GrOpList should never receive new/more ops
SkASSERT(!this->isClosed());
@ -410,7 +418,7 @@ void GrRenderTargetOpList::forwardCombine() {
fRecordedOps[j].fOp = std::move(fRecordedOps[i].fOp);
break;
}
// Stop going traversing if we would cause a painter's order violation.
// Stop traversing if we would cause a painter's order violation.
if (!can_reorder(fRecordedOps[j].fOp->bounds(), op->bounds())) {
GrOP_INFO("\t\tForward: Intersects with (%s, opID: %u)\n", candidate.fOp->name(),
candidate.fOp->uniqueID());

View File

@ -32,10 +32,9 @@ GrSurfaceProxy::GrSurfaceProxy(sk_sp<GrSurface> surface, SkBackingFit fit)
}
GrSurfaceProxy::~GrSurfaceProxy() {
if (fLastOpList) {
fLastOpList->clearTarget();
}
SkSafeUnref(fLastOpList);
// For this to be deleted the opList that held a ref on it (if there was one) must have been
// deleted. Which would have cleared out this back pointer.
SkASSERT(!fLastOpList);
}
GrSurface* GrSurfaceProxy::instantiate(GrResourceProvider* resourceProvider) {
@ -97,15 +96,14 @@ int GrSurfaceProxy::worstCaseHeight(const GrCaps& caps) const {
}
void GrSurfaceProxy::setLastOpList(GrOpList* opList) {
#ifdef SK_DEBUG
if (fLastOpList) {
// The non-MDB world never closes so we can't check this condition
#ifdef ENABLE_MDB
SkASSERT(fLastOpList->isClosed());
#endif
fLastOpList->clearTarget();
}
#endif
SkRefCnt_SafeAssign(fLastOpList, opList);
// Un-reffed
fLastOpList = opList;
}
GrRenderTargetOpList* GrSurfaceProxy::getLastRenderTargetOpList() {

View File

@ -68,32 +68,16 @@ GrTextureOpList* GrTextureContext::getOpList() {
return fOpList.get();
}
// TODO: move this (and GrRenderTargetContext::copy) to GrSurfaceContext?
// MDB TODO: move this (and GrRenderTargetContext::copy) to GrSurfaceContext?
bool GrTextureContext::onCopy(GrSurfaceProxy* srcProxy,
const SkIRect& srcRect,
const SkIPoint& dstPoint) {
ASSERT_SINGLE_OWNER
RETURN_FALSE_IF_ABANDONED
SkDEBUGCODE(this->validate();)
GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrTextureContext::copy");
GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrTextureContext::onCopy");
#ifndef ENABLE_MDB
// We can't yet fully defer copies to textures, so GrTextureContext::copySurface will
// execute the copy immediately. Ensure the data is ready.
fContext->contextPriv().flushSurfaceWrites(srcProxy);
#endif
GrTextureOpList* opList = this->getOpList();
bool result = opList->copySurface(fContext->resourceProvider(),
fTextureProxy.get(), srcProxy, srcRect, dstPoint);
#ifndef ENABLE_MDB
GrOpFlushState flushState(fContext->getGpu(), nullptr);
opList->prepareOps(&flushState);
opList->executeOps(&flushState);
opList->reset();
#endif
return result;
return this->getOpList()->copySurface(fContext->resourceProvider(),
fTextureProxy.get(), srcProxy, srcRect, dstPoint);
}

View File

@ -50,7 +50,7 @@ void GrTextureOpList::validateTargetsSingleRenderTarget() const {
#endif
void GrTextureOpList::prepareOps(GrOpFlushState* flushState) {
// MDB TODO: add SkASSERT(this->isClosed());
SkASSERT(this->isClosed());
// Loop over the ops that haven't yet generated their geometry
for (int i = 0; i < fRecordedOps.count(); ++i) {
@ -81,6 +81,7 @@ void GrTextureOpList::reset() {
////////////////////////////////////////////////////////////////////////////////
// MDB TODO: fuse with GrRenderTargetOpList::copySurface
bool GrTextureOpList::copySurface(GrResourceProvider* resourceProvider,
GrSurfaceProxy* dst,
GrSurfaceProxy* src,

View File

@ -899,6 +899,8 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(SurfaceCreationWithColorSpace_Gpu, reporter,
test_surface_creation_and_snapshot_with_color_space(reporter, "wrapped", f16Support,
wrappedSurfaceMaker);
context->flush();
for (auto textureHandle : textureHandles) {
context->getGpu()->deleteTestingOnlyBackendTexture(textureHandle);
}