Remove intermediary RenderTaskDAG class

This thing is a leaky abstraction ("rawRemove" fn) and it doesn't
actually do much encapsulation. Almost all the code is just views into
the underlying array and wrappers that could be replaced by

std::algorithm routines (and in this CL, are). It's more trouble than
it's worth – maybe in the past that wasn't so.
Change-Id: I81f3aa6874525f8f2ed1315f50e084030e34865e
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/332718
Commit-Queue: Adlai Holler <adlai@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
This commit is contained in:
Adlai Holler 2020-11-11 13:54:37 -05:00 committed by Skia Commit-Bot
parent 80bfda87c8
commit 33432272d6
2 changed files with 127 additions and 179 deletions

View File

@ -44,100 +44,6 @@
#include "src/gpu/text/GrSDFTOptions.h"
#include "src/image/SkSurface_Gpu.h"
void GrDrawingManager::RenderTaskDAG::gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const {
idArray->reset(fRenderTasks.count());
for (int i = 0; i < fRenderTasks.count(); ++i) {
if (fRenderTasks[i]) {
(*idArray)[i] = fRenderTasks[i]->uniqueID();
}
}
}
void GrDrawingManager::RenderTaskDAG::reset() {
fRenderTasks.reset();
}
void GrDrawingManager::RenderTaskDAG::rawRemoveRenderTasks(int startIndex, int stopIndex) {
for (int i = startIndex; i < stopIndex; ++i) {
fRenderTasks[i] = nullptr;
}
}
bool GrDrawingManager::RenderTaskDAG::isUsed(GrSurfaceProxy* proxy) const {
for (const auto& task : fRenderTasks) {
if (task && task->isUsed(proxy)) {
return true;
}
}
return false;
}
GrRenderTask* GrDrawingManager::RenderTaskDAG::add(sk_sp<GrRenderTask> renderTask) {
if (renderTask) {
return fRenderTasks.emplace_back(std::move(renderTask)).get();
}
return nullptr;
}
GrRenderTask* GrDrawingManager::RenderTaskDAG::addBeforeLast(sk_sp<GrRenderTask> renderTask) {
SkASSERT(!fRenderTasks.empty());
if (renderTask) {
// Release 'fRenderTasks.back()' and grab the raw pointer, in case the SkTArray grows
// and reallocates during emplace_back.
fRenderTasks.emplace_back(fRenderTasks.back().release());
return (fRenderTasks[fRenderTasks.count() - 2] = std::move(renderTask)).get();
}
return nullptr;
}
void GrDrawingManager::RenderTaskDAG::add(const SkTArray<sk_sp<GrRenderTask>>& renderTasks) {
#ifdef SK_DEBUG
for (auto& renderTask : renderTasks) {
SkASSERT(renderTask->unique());
}
#endif
fRenderTasks.push_back_n(renderTasks.count(), renderTasks.begin());
}
void GrDrawingManager::RenderTaskDAG::swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks) {
SkASSERT(renderTasks->empty());
renderTasks->swap(fRenderTasks);
}
void GrDrawingManager::RenderTaskDAG::prepForFlush() {
if (!SkTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(&fRenderTasks)) {
SkDEBUGFAIL("Render task topo sort failed.");
return;
}
#ifdef SK_DEBUG
// This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks
// share the same backing GrSurfaceProxy it means the opsTask was artificially split.
if (fRenderTasks.count()) {
GrOpsTask* prevOpsTask = fRenderTasks[0]->asOpsTask();
for (int i = 1; i < fRenderTasks.count(); ++i) {
GrOpsTask* curOpsTask = fRenderTasks[i]->asOpsTask();
if (prevOpsTask && curOpsTask) {
SkASSERT(prevOpsTask->target(0).proxy() != curOpsTask->target(0).proxy());
}
prevOpsTask = curOpsTask;
}
}
#endif
}
void GrDrawingManager::RenderTaskDAG::closeAll(const GrCaps* caps) {
for (auto& task : fRenderTasks) {
if (task) {
task->makeClosed(*caps);
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
GrDrawingManager::GrDrawingManager(GrRecordingContext* context,
const GrPathRendererChain::Options& optionsForPathRendererChain,
@ -150,8 +56,8 @@ GrDrawingManager::GrDrawingManager(GrRecordingContext* context,
, fReduceOpsTaskSplitting(reduceOpsTaskSplitting) { }
GrDrawingManager::~GrDrawingManager() {
fDAG.closeAll(fContext->priv().caps());
this->removeRenderTasks(0, fDAG.numRenderTasks());
this->closeAllTasks();
this->removeRenderTasks(0, fDAG.count());
}
bool GrDrawingManager::wasAbandoned() const {
@ -195,7 +101,10 @@ bool GrDrawingManager::flush(
if (!proxies.empty() && !info.fNumSemaphores && !info.fFinishedProc &&
access == SkSurface::BackendSurfaceAccess::kNoAccess && !newState) {
bool allUnused = std::all_of(proxies.begin(), proxies.end(), [&](GrSurfaceProxy* proxy) {
return !fDAG.isUsed(proxy) && !this->isDDLTarget(proxy);
bool used = std::any_of(fDAG.begin(), fDAG.end(), [&](auto& task) {
return task && task->isUsed(proxy);
});
return !used && !this->isDDLTarget(proxy);
});
if (allUnused) {
if (info.fSubmittedProc) {
@ -222,10 +131,10 @@ bool GrDrawingManager::flush(
// to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
// flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
// if the SkGpuDevice(s) write to them again.
fDAG.closeAll(fContext->priv().caps());
this->closeAllTasks();
fActiveOpsTask = nullptr;
fDAG.prepForFlush();
this->sortTasks();
if (!fCpuBufferCache) {
// We cache more buffers when the backend is using client side arrays. Otherwise, we
// expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
@ -240,7 +149,12 @@ bool GrDrawingManager::flush(
// Prepare any onFlush op lists (e.g. atlases).
if (!fOnFlushCBObjects.empty()) {
fDAG.gatherIDs(&fFlushingRenderTaskIDs);
fFlushingRenderTaskIDs.reset(fDAG.count());
for (int i = 0; i < fDAG.count(); i++) {
if (fDAG[i]) {
fFlushingRenderTaskIDs[i] = fDAG[i]->uniqueID();
}
}
for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs);
@ -274,9 +188,9 @@ bool GrDrawingManager::flush(
for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
SkDEBUGCODE(onFlushRenderTask->dump(/* printDependencies */ true);)
}
SkDEBUGCODE(SkDebugf("Normal renderTasks (%d):\n", fDAG.numRenderTasks()));
for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
SkDEBUGCODE(fDAG.renderTask(i)->dump(/* printDependencies */ true);)
SkDEBUGCODE(SkDebugf("Normal renderTasks (%d):\n", fDAG.count()));
for (const auto& task : fDAG) {
SkDEBUGCODE(task->dump(/* printDependencies */ true);)
}
#endif
@ -284,10 +198,10 @@ bool GrDrawingManager::flush(
bool flushed = false;
{
GrResourceAllocator alloc(resourceProvider SkDEBUGCODE(, fDAG.numRenderTasks()));
for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
if (fDAG.renderTask(i)) {
fDAG.renderTask(i)->gatherProxyIntervals(&alloc);
GrResourceAllocator alloc(resourceProvider SkDEBUGCODE(, fDAG.count()));
for (int i = 0; i < fDAG.count(); ++i) {
if (fDAG[i]) {
fDAG[i]->gatherProxyIntervals(&alloc);
}
alloc.markEndOfOpsTask(i);
}
@ -298,7 +212,7 @@ bool GrDrawingManager::flush(
while (alloc.assign(&startIndex, &stopIndex, &error)) {
if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
for (int i = startIndex; i < stopIndex; ++i) {
GrRenderTask* renderTask = fDAG.renderTask(i);
GrRenderTask* renderTask = fDAG[i].get();
if (!renderTask) {
continue;
}
@ -321,10 +235,10 @@ bool GrDrawingManager::flush(
}
#ifdef SK_DEBUG
for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
for (const auto& task : fDAG) {
// All render tasks should have been cleared out by now we only reset the array below to
// reclaim storage.
SkASSERT(!fDAG.renderTask(i));
SkASSERT(!task);
}
#endif
fLastRenderTasks.reset();
@ -375,14 +289,14 @@ bool GrDrawingManager::submitToGpu(bool syncToCpu) {
bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState* flushState,
int* numRenderTasksExecuted) {
SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numRenderTasks());
SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.count());
#if GR_FLUSH_TIME_OP_SPEW
SkDebugf("Flushing opsTask: %d to %d out of [%d, %d]\n",
startIndex, stopIndex, 0, fDAG.numRenderTasks());
startIndex, stopIndex, 0, fDAG.count());
for (int i = startIndex; i < stopIndex; ++i) {
if (fDAG.renderTask(i)) {
fDAG.renderTask(i)->dump(true);
if (fDAG[i]) {
fDAG[i]->dump(true);
}
}
#endif
@ -390,7 +304,7 @@ bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlu
bool anyRenderTasksExecuted = false;
for (int i = startIndex; i < stopIndex; ++i) {
GrRenderTask* renderTask = fDAG.renderTask(i);
GrRenderTask* renderTask = fDAG[i].get();
if (!renderTask || !renderTask->isInstantiated()) {
continue;
}
@ -428,7 +342,7 @@ bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlu
// Execute the normal op lists.
for (int i = startIndex; i < stopIndex; ++i) {
GrRenderTask* renderTask = fDAG.renderTask(i);
GrRenderTask* renderTask = fDAG[i].get();
if (!renderTask || !renderTask->isInstantiated()) {
continue;
}
@ -458,7 +372,7 @@ bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlu
void GrDrawingManager::removeRenderTasks(int startIndex, int stopIndex) {
for (int i = startIndex; i < stopIndex; ++i) {
GrRenderTask* task = fDAG.renderTask(i);
GrRenderTask* task = fDAG[i].get();
if (!task) {
continue;
}
@ -467,8 +381,74 @@ void GrDrawingManager::removeRenderTasks(int startIndex, int stopIndex) {
task->endFlush(this);
}
task->disown(this);
// This doesn't cleanup any referring pointers (e.g. dependency pointers in the DAG).
// It works right now bc this is only called after the topological sort is complete
// (so the dangling pointers aren't used).
fDAG[i] = nullptr;
}
fDAG.rawRemoveRenderTasks(startIndex, stopIndex);
}
void GrDrawingManager::sortTasks() {
if (!SkTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(&fDAG)) {
SkDEBUGFAIL("Render task topo sort failed.");
return;
}
#ifdef SK_DEBUG
// This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks
// share the same backing GrSurfaceProxy it means the opsTask was artificially split.
if (!fDAG.empty()) {
GrOpsTask* prevOpsTask = fDAG[0]->asOpsTask();
for (int i = 1; i < fDAG.count(); ++i) {
GrOpsTask* curOpsTask = fDAG[i]->asOpsTask();
if (prevOpsTask && curOpsTask) {
SkASSERT(prevOpsTask->target(0).proxy() != curOpsTask->target(0).proxy());
}
prevOpsTask = curOpsTask;
}
}
#endif
}
void GrDrawingManager::closeAllTasks() {
const GrCaps& caps = *fContext->priv().caps();
for (auto& task : fDAG) {
if (task) {
task->makeClosed(caps);
}
}
}
GrRenderTask* GrDrawingManager::insertTaskBeforeLast(sk_sp<GrRenderTask> task) {
SkASSERT(!fDAG.empty());
if (!task) {
return nullptr;
}
// Release 'fDAG.back()' and grab the raw pointer, in case the SkTArray grows
// and reallocates during emplace_back.
// TODO: Either use std::vector that can do this for us, or use SkSTArray to get the
// perf win.
fDAG.emplace_back(fDAG.back().release());
return (fDAG[fDAG.count() - 2] = std::move(task)).get();
}
GrRenderTask* GrDrawingManager::appendTask(sk_sp<GrRenderTask> task) {
if (!task) {
return nullptr;
}
return fDAG.push_back(std::move(task)).get();
}
void GrDrawingManager::appendTasks(SkSpan<const sk_sp<GrRenderTask>> tasks) {
#ifdef SK_DEBUG
for (const auto& task : tasks) {
SkASSERT(task && task->unique());
}
#endif
fDAG.push_back_n(tasks.count(), tasks.begin());
}
static void resolve_and_mipmap(GrGpu* gpu, GrSurfaceProxy* proxy) {
@ -582,11 +562,11 @@ void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
SkDEBUGCODE(this->validate());
// no renderTask should receive a new command after this
fDAG.closeAll(fContext->priv().caps());
this->closeAllTasks();
fActiveOpsTask = nullptr;
fDAG.swap(&ddl->fRenderTasks);
SkASSERT(!fDAG.numRenderTasks());
fDAG.swap(ddl->fRenderTasks);
SkASSERT(fDAG.empty());
for (auto& renderTask : ddl->fRenderTasks) {
renderTask->disown(this);
@ -641,10 +621,10 @@ void GrDrawingManager::copyRenderTasksFromDDL(sk_sp<const SkDeferredDisplayList>
ccpr->mergePendingPaths(ddl->fPendingPaths);
}
fDAG.add(ddl->fRenderTasks);
this->appendTasks(ddl->fRenderTasks);
// Add a task to unref the DDL after flush.
GrRenderTask* unrefTask = fDAG.add(sk_make_sp<GrUnrefDDLTask>(std::move(ddl)));
GrRenderTask* unrefTask = this->appendTask(sk_make_sp<GrUnrefDDLTask>(std::move(ddl)));
unrefTask->makeClosed(*fContext->priv().caps());
SkDEBUGCODE(this->validate());
@ -658,21 +638,21 @@ void GrDrawingManager::validate() const {
if (fActiveOpsTask) {
SkASSERT(!fDAG.empty());
SkASSERT(!fActiveOpsTask->isClosed());
SkASSERT(fActiveOpsTask == fDAG.back());
SkASSERT(fActiveOpsTask == fDAG.back().get());
}
for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
if (fActiveOpsTask != fDAG.renderTask(i)) {
for (int i = 0; i < fDAG.count(); ++i) {
if (fActiveOpsTask != fDAG[i].get()) {
// The resolveTask associated with the activeTask remains open for as long as the
// activeTask does.
bool isActiveResolveTask =
fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG.renderTask(i);
SkASSERT(isActiveResolveTask || fDAG.renderTask(i)->isClosed());
fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG[i].get();
SkASSERT(isActiveResolveTask || fDAG[i]->isClosed());
}
}
if (!fDAG.empty() && !fDAG.back()->isClosed()) {
SkASSERT(fActiveOpsTask == fDAG.back());
SkASSERT(fActiveOpsTask == fDAG.back().get());
}
}
}
@ -712,7 +692,7 @@ sk_sp<GrOpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
SkASSERT(this->getLastRenderTask(proxy) == opsTask.get());
if (managedOpsTask) {
fDAG.add(opsTask);
this->appendTask(opsTask);
if (!fReduceOpsTaskSplitting) {
fActiveOpsTask = opsTask.get();
@ -733,8 +713,8 @@ GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTask(const
// Add the new textureResolveTask before the fActiveOpsTask (if not in
// sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
// NOTE: Putting it here will also reduce the amount of work required by the topological sort.
return static_cast<GrTextureResolveRenderTask*>(fDAG.addBeforeLast(
sk_make_sp<GrTextureResolveRenderTask>()));
GrRenderTask* task = this->insertTaskBeforeLast(sk_make_sp<GrTextureResolveRenderTask>());
return static_cast<GrTextureResolveRenderTask*>(task);
}
void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
@ -775,11 +755,11 @@ void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
}
this->setLastRenderTask(proxy.get(), waitTask.get());
}
fDAG.add(waitTask);
this->appendTask(waitTask);
} else {
if (fActiveOpsTask && (fActiveOpsTask->target(0).proxy() == proxy.get())) {
SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
fDAG.addBeforeLast(waitTask);
this->insertTaskBeforeLast(waitTask);
// In this case we keep the current renderTask open but just insert the new waitTask
// before it in the list. The waitTask will never need to trigger any resolves or mip
// map generation which is the main advantage of going through the proxy version.
@ -805,7 +785,7 @@ void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
}
this->setLastRenderTask(proxy.get(), waitTask.get());
this->closeRenderTasksForNewRenderTask(proxy.get());
fDAG.add(waitTask);
this->appendTask(waitTask);
}
}
waitTask->makeClosed(caps);
@ -824,7 +804,7 @@ void GrDrawingManager::newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
// This copies from srcProxy to dstBuffer so it doesn't have a real target.
this->closeRenderTasksForNewRenderTask(nullptr);
GrRenderTask* task = fDAG.add(sk_make_sp<GrTransferFromRenderTask>(
GrRenderTask* task = this->appendTask(sk_make_sp<GrTransferFromRenderTask>(
srcProxy, srcRect, surfaceColorType, dstColorType,
std::move(dstBuffer), dstOffset));
@ -855,8 +835,8 @@ bool GrDrawingManager::newCopyRenderTask(GrSurfaceProxyView srcView,
GrSurfaceProxy* srcProxy = srcView.proxy();
GrRenderTask* task =
fDAG.add(GrCopyRenderTask::Make(this, std::move(srcView), srcRect, std::move(dstView),
dstPoint, &caps));
this->appendTask(GrCopyRenderTask::Make(this, std::move(srcView), srcRect,
std::move(dstView), dstPoint, &caps));
if (!task) {
return false;
}

View File

@ -120,46 +120,6 @@ public:
void copyRenderTasksFromDDL(sk_sp<const SkDeferredDisplayList>, GrRenderTargetProxy* newDest);
private:
// This class encapsulates maintenance and manipulation of the drawing manager's DAG of
// renderTasks.
class RenderTaskDAG {
public:
// This call will topologically sort the GrRenderTasks.
void prepForFlush();
void closeAll(const GrCaps* caps);
void gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const;
void reset();
// This call forceably removes GrRenderTasks from the DAG. It is problematic bc it
// just removes the GrRenderTasks but doesn't cleanup any referring pointers (i.e.
// dependency pointers in the DAG). It works right now bc it is only called after the
// topological sort is complete (so the dangling pointers aren't used).
void rawRemoveRenderTasks(int startIndex, int stopIndex);
bool empty() const { return fRenderTasks.empty(); }
int numRenderTasks() const { return fRenderTasks.count(); }
bool isUsed(GrSurfaceProxy*) const;
GrRenderTask* renderTask(int index) { return fRenderTasks[index].get(); }
const GrRenderTask* renderTask(int index) const { return fRenderTasks[index].get(); }
GrRenderTask* back() { return fRenderTasks.back().get(); }
const GrRenderTask* back() const { return fRenderTasks.back().get(); }
GrRenderTask* add(sk_sp<GrRenderTask>);
GrRenderTask* addBeforeLast(sk_sp<GrRenderTask>);
void add(const SkTArray<sk_sp<GrRenderTask>>&);
void swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks);
private:
SkTArray<sk_sp<GrRenderTask>> fRenderTasks;
};
GrDrawingManager(GrRecordingContext*,
const GrPathRendererChain::Options&,
bool reduceOpsTaskSplitting);
@ -177,6 +137,14 @@ private:
void removeRenderTasks(int startIndex, int stopIndex);
void sortTasks();
void closeAllTasks();
GrRenderTask* appendTask(sk_sp<GrRenderTask>);
void appendTasks(SkSpan<const sk_sp<GrRenderTask>>);
GrRenderTask* insertTaskBeforeLast(sk_sp<GrRenderTask>);
bool flush(SkSpan<GrSurfaceProxy*> proxies,
SkSurface::BackendSurfaceAccess access,
const GrFlushInfo&,
@ -202,7 +170,7 @@ private:
// flushes.
sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache;
RenderTaskDAG fDAG;
SkTArray<sk_sp<GrRenderTask>> fDAG;
GrOpsTask* fActiveOpsTask = nullptr;
// These are the IDs of the opsTask currently being flushed (in internalFlush)
SkSTArray<8, uint32_t, true> fFlushingRenderTaskIDs;