Don't pass around renderTargetContexts from onFlush callbacks

The drawing manager was just grabbing an opsTask off of these contexts
anyway. Instead, the onFlushResourceProvider can just snag an opsTask
off the renderTargetContext and populate the drawing manager's list of
onFlushRenderTasks.

Bug: skia:
Change-Id: I3bdb48176364bbd6e5a34fab437c45ed77d6687f
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/236760
Commit-Queue: Chris Dalton <csmartdalton@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
This commit is contained in:
Chris Dalton 2019-08-23 10:10:36 -06:00 committed by Skia Commit-Bot
parent a0ed070aa0
commit c4b4735a09
19 changed files with 80 additions and 64 deletions

View File

@ -36,6 +36,12 @@ private:
}
bool onExecute(GrOpFlushState*) override;
#ifdef SK_DEBUG
void visitProxies_debugOnly(const GrOp::VisitProxyFunc& fn) const override {
fn(fSrcProxy.get(), GrMipMapped::kNo);
}
#endif
sk_sp<GrSurfaceProxy> fSrcProxy;
SkIRect fSrcRect;
SkIPoint fDstPoint;

View File

@ -281,33 +281,36 @@ GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int num
if (!fOnFlushCBObjects.empty()) {
fDAG.gatherIDs(&fFlushingRenderTaskIDs);
SkSTArray<4, std::unique_ptr<GrRenderTargetContext>> renderTargetContexts;
for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs.begin(),
fFlushingRenderTaskIDs.count(), &renderTargetContexts);
for (const auto& rtc : renderTargetContexts) {
sk_sp<GrOpsTask> onFlushOpsTask = sk_ref_sp(rtc->getOpsTask());
if (!onFlushOpsTask) {
continue; // Odd - but not a big deal
}
fFlushingRenderTaskIDs.count());
}
for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
onFlushRenderTask->makeClosed(*fContext->priv().caps());
#ifdef SK_DEBUG
// OnFlush callbacks are already invoked during flush, and are therefore expected to
// handle resource allocation & usage on their own. (No deferred or lazy proxies!)
onFlushOpsTask->visitProxies_debugOnly([](GrSurfaceProxy* p, GrMipMapped) {
SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
SkASSERT(GrSurfaceProxy::LazyState::kNot == p->lazyInstantiationState());
});
// OnFlush callbacks are invoked during flush, and are therefore expected to handle
// resource allocation & usage on their own. (No deferred or lazy proxies!)
onFlushRenderTask->visitTargetAndSrcProxies_debugOnly(
[](GrSurfaceProxy* p, GrMipMapped mipMapped) {
SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
SkASSERT(GrSurfaceProxy::LazyState::kNot == p->lazyInstantiationState());
if (GrMipMapped::kYes == mipMapped) {
// The onFlush callback is responsible for regenerating mips if needed.
SkASSERT(p->asTextureProxy() && !p->asTextureProxy()->mipMapsAreDirty());
}
});
#endif
onFlushOpsTask->makeClosed(*fContext->priv().caps());
onFlushOpsTask->prepare(&flushState);
fOnFlushCBOpsTasks.push_back(std::move(onFlushOpsTask));
}
renderTargetContexts.reset();
onFlushRenderTask->prepare(&flushState);
}
}
#if 0
// Enable this to print out verbose GrOp information
SkDEBUGCODE(SkDebugf("onFlush renderTasks:"));
for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
SkDEBUGCODE(onFlushRenderTask->dump();)
}
SkDEBUGCODE(SkDebugf("Normal renderTasks:"));
for (int i = 0; i < fRenderTasks.count(); ++i) {
SkDEBUGCODE(fRenderTasks[i]->dump();)
}
@ -434,13 +437,13 @@ bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlu
// memory pressure.
static constexpr int kMaxRenderTasksBeforeFlush = 100;
// Execute the onFlush op lists first, if any.
for (sk_sp<GrOpsTask>& onFlushOpsTask : fOnFlushCBOpsTasks) {
if (!onFlushOpsTask->execute(flushState)) {
SkDebugf("WARNING: onFlushOpsTask failed to execute.\n");
// Execute the onFlush renderTasks first, if any.
for (sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) {
if (!onFlushRenderTask->execute(flushState)) {
SkDebugf("WARNING: onFlushRenderTask failed to execute.\n");
}
SkASSERT(onFlushOpsTask->unique());
onFlushOpsTask = nullptr;
SkASSERT(onFlushRenderTask->unique());
onFlushRenderTask = nullptr;
(*numRenderTasksExecuted)++;
if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
@ -448,7 +451,7 @@ bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlu
*numRenderTasksExecuted = 0;
}
}
fOnFlushCBOpsTasks.reset();
fOnFlushRenderTasks.reset();
// Execute the normal op lists.
for (int i = startIndex; i < stopIndex; ++i) {

View File

@ -211,8 +211,8 @@ private:
GrOpsTask* fActiveOpsTask = nullptr;
// These are the IDs of the opsTask currently being flushed (in internalFlush)
SkSTArray<8, uint32_t, true> fFlushingRenderTaskIDs;
// These are the new opsTask generated by the onFlush CBs
SkSTArray<8, sk_sp<GrOpsTask>> fOnFlushCBOpsTasks;
// These are the new renderTasks generated by the onFlush CBs
SkSTArray<4, sk_sp<GrRenderTask>> fOnFlushRenderTasks;
std::unique_ptr<GrTextContext> fTextContext;

View File

@ -16,9 +16,7 @@
#include "src/gpu/GrSurfaceProxy.h"
std::unique_ptr<GrRenderTargetContext> GrOnFlushResourceProvider::makeRenderTargetContext(
sk_sp<GrSurfaceProxy> proxy,
GrColorType colorType,
sk_sp<SkColorSpace> colorSpace,
sk_sp<GrSurfaceProxy> proxy, GrColorType colorType, sk_sp<SkColorSpace> colorSpace,
const SkSurfaceProps* props) {
// Since this is at flush time and these won't be allocated for us by the GrResourceAllocator
// we have to manually ensure it is allocated here. The proxy had best have been created
@ -36,6 +34,9 @@ std::unique_ptr<GrRenderTargetContext> GrOnFlushResourceProvider::makeRenderTarg
renderTargetContext->discard();
// FIXME: http://skbug.com/9357: This breaks if the renderTargetContext splits its opsTask.
fDrawingMgr->fOnFlushRenderTasks.push_back(sk_ref_sp(renderTargetContext->getOpsTask()));
return renderTargetContext;
}

View File

@ -35,8 +35,8 @@ public:
* callback. The callback should return the render target contexts used to render the atlases
* in 'results'.
*/
virtual void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs, int numOpsTaskIDs,
SkTArray<std::unique_ptr<GrRenderTargetContext>>* results) = 0;
virtual void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs,
int numOpsTaskIDs) = 0;
/**
* Called once flushing is complete and all ops indicated by preFlush have been executed and

View File

@ -97,7 +97,7 @@ public:
SkDEBUGCODE(void dump(bool printDependencies) const override;)
SkDEBUGCODE(int numClips() const override { return fNumClips; })
SkDEBUGCODE(void visitProxies_debugOnly(const GrOp::VisitProxyFunc&) const;)
SkDEBUGCODE(void visitProxies_debugOnly(const GrOp::VisitProxyFunc&) const override;)
private:
bool isNoOp() const {

View File

@ -519,6 +519,7 @@ private:
friend class GrAtlasTextBlob; // for access to add[Mesh]DrawOp
friend class GrClipStackClip; // for access to getOpsTask
friend class GrOnFlushResourceProvider; // for access to getOpsTask (http://skbug.com/9357)
friend class GrDrawingManager; // for ctor
friend class GrRenderTargetContextPriv;

View File

@ -57,12 +57,21 @@ public:
*/
virtual GrOpsTask* asOpsTask() { return nullptr; }
#ifdef SK_DEBUG
/*
* Dump out the GrRenderTask dependency DAG
*/
SkDEBUGCODE(virtual void dump(bool printDependencies) const;)
virtual void dump(bool printDependencies) const;
SkDEBUGCODE(virtual int numClips() const { return 0; })
virtual int numClips() const { return 0; }
virtual void visitProxies_debugOnly(const GrOp::VisitProxyFunc&) const = 0;
void visitTargetAndSrcProxies_debugOnly(const GrOp::VisitProxyFunc& fn) const {
this->visitProxies_debugOnly(fn);
fn(fTarget.get(), GrMipMapped::kNo);
}
#endif
protected:
// In addition to just the GrSurface being allocated, has the stencil buffer been allocated (if

View File

@ -36,6 +36,11 @@ private:
bool onExecute(GrOpFlushState*) override;
#ifdef SK_DEBUG
// No non-dst proxies.
void visitProxies_debugOnly(const GrOp::VisitProxyFunc& fn) const override {}
#endif
const GrTextureResolveFlags fResolveFlags;
};

View File

@ -42,6 +42,12 @@ private:
bool onExecute(GrOpFlushState*) override;
#ifdef SK_DEBUG
void visitProxies_debugOnly(const GrOp::VisitProxyFunc& fn) const override {
fn(fSrcProxy.get(), GrMipMapped::kNo);
}
#endif
sk_sp<GrSurfaceProxy> fSrcProxy;
SkIRect fSrcRect;
GrColorType fSurfaceColorType;

View File

@ -499,8 +499,7 @@ void GrCCPerFlushResources::recordStencilResolveInstance(
(int16_t)atlasIBounds.right(), (int16_t)atlasIBounds.bottom()};
}
bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
SkTArray<std::unique_ptr<GrRenderTargetContext>>* out) {
bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP) {
SkASSERT(this->isMapped());
SkASSERT(fNextPathInstanceIdx == fEndPathInstance);
SkASSERT(fNextCopyInstanceIdx == fEndCopyInstance);
@ -553,7 +552,6 @@ bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
}
baseCopyInstance = endCopyInstance;
}
out->push_back(std::move(rtc));
}
SkASSERT(fCopyPathRanges.count() == copyRangeIdx);
SkASSERT(fNextCopyInstanceIdx == baseCopyInstance);
@ -590,7 +588,6 @@ bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
atlas->getStrokeBatchID(), atlas->drawBounds());
}
rtc->addDrawOp(GrNoClip(), std::move(op));
out->push_back(std::move(rtc));
}
SkASSERT(atlas->getEndStencilResolveInstance() >= baseStencilResolveInstance);

View File

@ -107,8 +107,7 @@ public:
}
// Finishes off the GPU buffers and renders the atlas(es).
bool finalize(GrOnFlushResourceProvider*,
SkTArray<std::unique_ptr<GrRenderTargetContext>>* out);
bool finalize(GrOnFlushResourceProvider*);
// Accessors used by draw calls, once the resources have been finalized.
const GrCCFiller& filler() const { SkASSERT(!this->isMapped()); return fFiller; }

View File

@ -228,10 +228,7 @@ std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipPro
}
void GrCoverageCountingPathRenderer::preFlush(
GrOnFlushResourceProvider* onFlushRP,
const uint32_t* opsTaskIDs,
int numOpsTaskIDs,
SkTArray<std::unique_ptr<GrRenderTargetContext>>* out) {
GrOnFlushResourceProvider* onFlushRP, const uint32_t* opsTaskIDs, int numOpsTaskIDs) {
using DoCopiesToA8Coverage = GrCCDrawPathsOp::DoCopiesToA8Coverage;
SkASSERT(!fFlushing);
SkASSERT(fFlushingPaths.empty());
@ -308,7 +305,7 @@ void GrCoverageCountingPathRenderer::preFlush(
}
// Allocate resources and then render the atlas(es).
if (!resources->finalize(onFlushRP, out)) {
if (!resources->finalize(onFlushRP)) {
return;
}

View File

@ -65,8 +65,8 @@ public:
const GrCaps&);
// GrOnFlushCallbackObject overrides.
void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs, int numOpsTaskIDs,
SkTArray<std::unique_ptr<GrRenderTargetContext>>* out) override;
void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs,
int numOpsTaskIDs) override;
void postFlush(GrDeferredUploadToken, const uint32_t* opsTaskIDs, int numOpsTaskIDs) override;
void purgeCacheEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point&);

View File

@ -33,10 +33,9 @@ public:
// the list of active OnFlushBackkbackObjects in an freeGpuResources call (i.e., we accept the
// default retainOnFreeGpuResources implementation).
void preFlush(GrOnFlushResourceProvider* onFlushResourceProvider, const uint32_t*, int,
SkTArray<std::unique_ptr<GrRenderTargetContext>>*) override {
void preFlush(GrOnFlushResourceProvider* onFlushRP, const uint32_t*, int) override {
if (fAtlas) {
fAtlas->instantiate(onFlushResourceProvider);
fAtlas->instantiate(onFlushRP);
}
}

View File

@ -88,11 +88,10 @@ public:
// GrOnFlushCallbackObject overrides
void preFlush(GrOnFlushResourceProvider* onFlushResourceProvider, const uint32_t*, int,
SkTArray<std::unique_ptr<GrRenderTargetContext>>*) override {
void preFlush(GrOnFlushResourceProvider* onFlushRP, const uint32_t*, int) override {
for (int i = 0; i < kMaskFormatCount; ++i) {
if (fAtlases[i]) {
fAtlases[i]->instantiate(onFlushResourceProvider);
fAtlases[i]->instantiate(onFlushRP);
}
}
}

View File

@ -321,8 +321,8 @@ protected:
int lastCopyAtlasID() const { return fLastCopyAtlasID; }
int lastRenderedAtlasID() const { return fLastRenderedAtlasID; }
void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs, int numOpsTaskIDs,
SkTArray<std::unique_ptr<GrRenderTargetContext>>* out) override {
void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs,
int numOpsTaskIDs) override {
fLastRenderedAtlasID = fLastCopyAtlasID = 0;
const GrCCPerFlushResources* resources = fCCPR->testingOnly_getCurrentFlushResources();

View File

@ -42,8 +42,7 @@ public:
REPORTER_ASSERT(fReporter, fHasClipTexture);
}
void preFlush(GrOnFlushResourceProvider*, const uint32_t*, int,
SkTArray<std::unique_ptr<GrRenderTargetContext>>*) override {
void preFlush(GrOnFlushResourceProvider*, const uint32_t*, int) override {
REPORTER_ASSERT(fReporter, !fHasOpTexture);
REPORTER_ASSERT(fReporter, !fHasClipTexture);
}

View File

@ -335,10 +335,7 @@ public:
*/
void preFlush(GrOnFlushResourceProvider* resourceProvider,
const uint32_t* opsTaskIDs,
int numOpsTaskIDs,
SkTArray<std::unique_ptr<GrRenderTargetContext>>* results) override {
SkASSERT(!results->count());
int numOpsTaskIDs) override {
// Until MDB is landed we will most-likely only have one opsTask.
SkTDArray<LinkedListHeader*> lists;
for (int i = 0; i < numOpsTaskIDs; ++i) {
@ -397,8 +394,6 @@ public:
// We've updated all these ops and we certainly don't want to process them again
this->clearOpsFor(lists[i]);
}
results->push_back(std::move(rtc));
}
private: