Revert "Add notion of active opList to GrDrawingManager"
This reverts commit 328f9c72ee
.
Reason for revert: assert is breaking Chrome roll
Original change's description:
> Add notion of active opList to GrDrawingManager
>
> With an incrementally sorted list of opLists we will no longer be able to
> assume that the last opList is the active one.
>
> This is pulled out of: https://skia-review.googlesource.com/c/skia/+/150614 (Begin centralizing opList DAG) and also contains some of the non-substantive changes from that CL.
>
> Change-Id: Ic3d264565afa61b19e2125c38a6ad053292bc7d9
> Reviewed-on: https://skia-review.googlesource.com/151021
> Reviewed-by: Brian Salomon <bsalomon@google.com>
> Commit-Queue: Robert Phillips <robertphillips@google.com>
TBR=bsalomon@google.com,robertphillips@google.com
Change-Id: I01647301a59e21da8dd1ad2aa4652cfc52b095ea
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://skia-review.googlesource.com/151369
Reviewed-by: Robert Phillips <robertphillips@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
This commit is contained in:
parent
cca8c6fa47
commit
936e33a346
@ -103,7 +103,7 @@ void GrDrawingManager::freeGpuResources() {
|
||||
fSoftwarePathRenderer = nullptr;
|
||||
}
|
||||
|
||||
static void end_oplist_flush_if_not_unique(GrOpList* opList) {
|
||||
static void end_oplist_flush_if_not_unique(const sk_sp<GrOpList>& opList) {
|
||||
if (!opList->unique()) {
|
||||
// TODO: Eventually this should be guaranteed unique: http://skbug.com/7111
|
||||
opList->endFlush();
|
||||
@ -120,22 +120,19 @@ GrSemaphoresSubmitted GrDrawingManager::internalFlush(GrSurfaceProxy*,
|
||||
if (fFlushing || this->wasAbandoned()) {
|
||||
return GrSemaphoresSubmitted::kNo;
|
||||
}
|
||||
SkDEBUGCODE(this->validate());
|
||||
|
||||
GrGpu* gpu = fContext->contextPriv().getGpu();
|
||||
if (!gpu) {
|
||||
return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
|
||||
}
|
||||
fFlushing = true;
|
||||
|
||||
// Semi-usually the GrOpLists are already closed at this point, but sometimes Ganesh
|
||||
// needs to flush mid-draw. In that case, the SkGpuDevice's GrOpLists won't be closed
|
||||
// but need to be flushed anyway. Closing such GrOpLists here will mean new
|
||||
// GrOpLists will be created to replace them if the SkGpuDevice(s) write to them again.
|
||||
for (int i = 0; i < fOpLists.count(); ++i) {
|
||||
// Semi-usually the GrOpLists are already closed at this point, but sometimes Ganesh
|
||||
// needs to flush mid-draw. In that case, the SkGpuDevice's GrOpLists won't be closed
|
||||
// but need to be flushed anyway. Closing such GrOpLists here will mean new
|
||||
// GrOpLists will be created to replace them if the SkGpuDevice(s) write to them again.
|
||||
fOpLists[i]->makeClosed(*fContext->contextPriv().caps());
|
||||
}
|
||||
fActiveOpList = nullptr;
|
||||
|
||||
if (fSortRenderTargets) {
|
||||
SkDEBUGCODE(bool result =) SkTTopoSort<GrOpList, GrOpList::TopoSortTraits>(&fOpLists);
|
||||
@ -222,7 +219,7 @@ GrSemaphoresSubmitted GrDrawingManager::internalFlush(GrSurfaceProxy*,
|
||||
for (int i = startIndex; i < stopIndex; ++i) {
|
||||
if (fOpLists[i] && !fOpLists[i]->isFullyInstantiated()) {
|
||||
// If the backing surface wasn't allocated drop the entire opList.
|
||||
end_oplist_flush_if_not_unique(fOpLists[i].get()); // http://skbug.com/7111
|
||||
end_oplist_flush_if_not_unique(fOpLists[i]); // http://skbug.com/7111
|
||||
fOpLists[i] = nullptr;
|
||||
}
|
||||
if (fOpLists[i]) {
|
||||
@ -298,14 +295,14 @@ bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushSt
|
||||
if (resourceProvider->explicitlyAllocateGPUResources()) {
|
||||
if (!fOpLists[i]->isFullyInstantiated()) {
|
||||
// If the backing surface wasn't allocated drop the draw of the entire opList.
|
||||
end_oplist_flush_if_not_unique(fOpLists[i].get()); // http://skbug.com/7111
|
||||
end_oplist_flush_if_not_unique(fOpLists[i]); // http://skbug.com/7111
|
||||
fOpLists[i] = nullptr;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
if (!fOpLists[i]->instantiate(resourceProvider)) {
|
||||
SkDebugf("OpList failed to instantiate.\n");
|
||||
end_oplist_flush_if_not_unique(fOpLists[i].get()); // http://skbug.com/7111
|
||||
end_oplist_flush_if_not_unique(fOpLists[i]); // http://skbug.com/7111
|
||||
fOpLists[i] = nullptr;
|
||||
continue;
|
||||
}
|
||||
@ -353,7 +350,7 @@ bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushSt
|
||||
if (!fOpLists[i]) {
|
||||
continue;
|
||||
}
|
||||
end_oplist_flush_if_not_unique(fOpLists[i].get()); // http://skbug.com/7111
|
||||
end_oplist_flush_if_not_unique(fOpLists[i]); // http://skbug.com/7111
|
||||
fOpLists[i] = nullptr;
|
||||
}
|
||||
|
||||
@ -365,7 +362,6 @@ GrSemaphoresSubmitted GrDrawingManager::prepareSurfaceForExternalIO(
|
||||
if (this->wasAbandoned()) {
|
||||
return GrSemaphoresSubmitted::kNo;
|
||||
}
|
||||
SkDEBUGCODE(this->validate());
|
||||
SkASSERT(proxy);
|
||||
|
||||
GrGpu* gpu = fContext->contextPriv().getGpu();
|
||||
@ -392,8 +388,6 @@ GrSemaphoresSubmitted GrDrawingManager::prepareSurfaceForExternalIO(
|
||||
gpu->regenerateMipMapLevels(tex);
|
||||
}
|
||||
}
|
||||
|
||||
SkDEBUGCODE(this->validate());
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -402,13 +396,10 @@ void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlush
|
||||
}
|
||||
|
||||
void GrDrawingManager::moveOpListsToDDL(SkDeferredDisplayList* ddl) {
|
||||
SkDEBUGCODE(this->validate());
|
||||
|
||||
// no opList should receive a new command after this
|
||||
for (int i = 0; i < fOpLists.count(); ++i) {
|
||||
// no opList should receive a new command after this
|
||||
fOpLists[i]->makeClosed(*fContext->contextPriv().caps());
|
||||
}
|
||||
fActiveOpList = nullptr;
|
||||
|
||||
SkASSERT(ddl->fOpLists.empty());
|
||||
ddl->fOpLists.swap(fOpLists);
|
||||
@ -418,14 +409,10 @@ void GrDrawingManager::moveOpListsToDDL(SkDeferredDisplayList* ddl) {
|
||||
ddl->fPendingPaths = ccpr->detachPendingPaths();
|
||||
}
|
||||
}
|
||||
|
||||
SkDEBUGCODE(this->validate());
|
||||
}
|
||||
|
||||
void GrDrawingManager::copyOpListsFromDDL(const SkDeferredDisplayList* ddl,
|
||||
GrRenderTargetProxy* newDest) {
|
||||
SkDEBUGCODE(this->validate());
|
||||
|
||||
// Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
|
||||
// The lazy proxy that references it (in the copied opLists) will steal its GrTexture.
|
||||
ddl->fLazyProxyData->fReplayDest = newDest;
|
||||
@ -436,42 +423,18 @@ void GrDrawingManager::copyOpListsFromDDL(const SkDeferredDisplayList* ddl,
|
||||
ccpr->mergePendingPaths(ddl->fPendingPaths);
|
||||
}
|
||||
fOpLists.push_back_n(ddl->fOpLists.count(), ddl->fOpLists.begin());
|
||||
|
||||
SkDEBUGCODE(this->validate());
|
||||
}
|
||||
|
||||
#ifdef SK_DEBUG
|
||||
void GrDrawingManager::validate() const {
|
||||
if (fActiveOpList) {
|
||||
SkASSERT(!fOpLists.empty());
|
||||
SkASSERT(!fActiveOpList->isClosed());
|
||||
SkASSERT(fActiveOpList == fOpLists.back().get());
|
||||
}
|
||||
|
||||
for (int i = 0; i < fOpLists.count(); ++i) {
|
||||
if (fActiveOpList != fOpLists[i].get()) {
|
||||
SkASSERT(fOpLists[i].get()->isClosed());
|
||||
}
|
||||
}
|
||||
|
||||
if (!fOpLists.empty() && !fOpLists.back()->isClosed()) {
|
||||
SkASSERT(fActiveOpList == fOpLists.back().get());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(GrRenderTargetProxy* rtp,
|
||||
bool managedOpList) {
|
||||
SkDEBUGCODE(this->validate());
|
||||
SkASSERT(fContext);
|
||||
|
||||
if (fActiveOpList) {
|
||||
if (!fOpLists.empty()) {
|
||||
// This is a temporary fix for the partial-MDB world. In that world we're not
|
||||
// reordering so ops that (in the single opList world) would've just glommed onto the
|
||||
// end of the single opList but referred to a far earlier RT need to appear in their
|
||||
// own opList.
|
||||
fActiveOpList->makeClosed(*fContext->contextPriv().caps());
|
||||
fActiveOpList = nullptr;
|
||||
fOpLists.back()->makeClosed(*fContext->contextPriv().caps());
|
||||
}
|
||||
|
||||
auto resourceProvider = fContext->contextPriv().resourceProvider();
|
||||
@ -485,24 +448,20 @@ sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(GrRenderTargetProxy* r
|
||||
|
||||
if (managedOpList) {
|
||||
fOpLists.push_back() = opList;
|
||||
fActiveOpList = opList.get();
|
||||
}
|
||||
|
||||
SkDEBUGCODE(this->validate());
|
||||
return opList;
|
||||
}
|
||||
|
||||
sk_sp<GrTextureOpList> GrDrawingManager::newTextureOpList(GrTextureProxy* textureProxy) {
|
||||
SkDEBUGCODE(this->validate());
|
||||
SkASSERT(fContext);
|
||||
|
||||
if (fActiveOpList) {
|
||||
if (!fOpLists.empty()) {
|
||||
// This is a temporary fix for the partial-MDB world. In that world we're not
|
||||
// reordering so ops that (in the single opList world) would've just glommed onto the
|
||||
// end of the single opList but referred to a far earlier RT need to appear in their
|
||||
// own opList.
|
||||
fActiveOpList->makeClosed(*fContext->contextPriv().caps());
|
||||
fActiveOpList = nullptr;
|
||||
fOpLists.back()->makeClosed(*fContext->contextPriv().caps());
|
||||
}
|
||||
|
||||
sk_sp<GrTextureOpList> opList(new GrTextureOpList(fContext->contextPriv().resourceProvider(),
|
||||
@ -513,9 +472,7 @@ sk_sp<GrTextureOpList> GrDrawingManager::newTextureOpList(GrTextureProxy* textur
|
||||
SkASSERT(textureProxy->getLastOpList() == opList.get());
|
||||
|
||||
fOpLists.push_back() = opList;
|
||||
fActiveOpList = opList.get();
|
||||
|
||||
SkDEBUGCODE(this->validate());
|
||||
return opList;
|
||||
}
|
||||
|
||||
|
@ -102,8 +102,6 @@ private:
|
||||
int numSemaphores,
|
||||
GrBackendSemaphore backendSemaphores[]);
|
||||
|
||||
SkDEBUGCODE(void validate() const);
|
||||
|
||||
friend class GrContext; // for access to: ctor, abandon, reset & flush
|
||||
friend class GrContextPriv; // access to: flush
|
||||
friend class GrOnFlushResourceProvider; // this is just a shallow wrapper around this class
|
||||
@ -120,7 +118,6 @@ private:
|
||||
|
||||
bool fAbandoned;
|
||||
SkTArray<sk_sp<GrOpList>> fOpLists;
|
||||
GrOpList* fActiveOpList = nullptr;
|
||||
// These are the IDs of the opLists currently being flushed (in internalFlush)
|
||||
SkSTArray<8, uint32_t, true> fFlushingOpListIDs;
|
||||
// These are the new opLists generated by the onFlush CBs
|
||||
|
Loading…
Reference in New Issue
Block a user