Elevate the target list of Ganesh tasks to base class

A follow up CL will use the availability of this information - the full
list of targets of a GrRenderTask – to enable faster storage of the
lastRenderTask association for a given surface proxy in a given drawing
manager.

Bug: skia:10320
Change-Id: I3eb3276b483a7f09481774896a024172b73a4c84
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/296729
Reviewed-by: Robert Phillips <robertphillips@google.com>
Commit-Queue: Adlai Holler <adlai@google.com>
This commit is contained in:
Adlai Holler 2020-06-16 14:30:08 -04:00 committed by Skia Commit-Bot
parent ec3508aaef
commit 33d569ee18
11 changed files with 107 additions and 106 deletions

View File

@ -51,26 +51,26 @@ GrCopyRenderTask::GrCopyRenderTask(GrDrawingManager* drawingMgr,
const SkIRect& srcRect,
GrSurfaceProxyView dstView,
const SkIPoint& dstPoint)
: GrRenderTask(std::move(dstView))
: GrRenderTask()
, fSrcView(std::move(srcView))
, fSrcRect(srcRect)
, fDstPoint(dstPoint) {
drawingMgr->setLastRenderTask(fTargetView.proxy(), this);
this->addTarget(drawingMgr, dstView);
}
void GrCopyRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
// This renderTask doesn't have "normal" ops. In this case we still need to add an interval (so
// fEndOfOpsTaskOpIndices will remain in sync), so we create a fake op# to capture the fact that
// we read fSrcView and copy to fTargetView.
// we read fSrcView and copy to target view.
alloc->addInterval(fSrcView.proxy(), alloc->curOp(), alloc->curOp(),
GrResourceAllocator::ActualUse::kYes);
alloc->addInterval(fTargetView.proxy(), alloc->curOp(), alloc->curOp(),
alloc->addInterval(this->target(0).proxy(), alloc->curOp(), alloc->curOp(),
GrResourceAllocator::ActualUse::kYes);
alloc->incOps();
}
bool GrCopyRenderTask::onExecute(GrOpFlushState* flushState) {
GrSurfaceProxy* dstProxy = fTargetView.proxy();
GrSurfaceProxy* dstProxy = this->target(0).proxy();
GrSurfaceProxy* srcProxy = fSrcView.proxy();
if (!srcProxy->isInstantiated() || !dstProxy->isInstantiated()) {
return false;

View File

@ -27,8 +27,6 @@ private:
const SkIPoint& dstPoint);
bool onIsUsed(GrSurfaceProxy* proxy) const override {
// This case should be handled by GrRenderTask.
SkASSERT(proxy != fTargetView.proxy());
return proxy == fSrcView.proxy();
}
// If instantiation failed, at flush time we simply will skip doing the copy.

View File

@ -65,8 +65,8 @@ void GrDrawingManager::RenderTaskDAG::removeRenderTasks(int startIndex, int stop
}
bool GrDrawingManager::RenderTaskDAG::isUsed(GrSurfaceProxy* proxy) const {
for (int i = 0; i < fRenderTasks.count(); ++i) {
if (fRenderTasks[i] && fRenderTasks[i]->isUsed(proxy)) {
for (const auto& task : fRenderTasks) {
if (task && task->isUsed(proxy)) {
return true;
}
}
@ -123,7 +123,7 @@ void GrDrawingManager::RenderTaskDAG::prepForFlush() {
GrOpsTask* curOpsTask = fRenderTasks[i]->asOpsTask();
if (prevOpsTask && curOpsTask) {
SkASSERT(prevOpsTask->fTargetView != curOpsTask->fTargetView);
SkASSERT(prevOpsTask->target(0).proxy() != curOpsTask->target(0).proxy());
}
prevOpsTask = curOpsTask;
@ -836,7 +836,7 @@ void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
}
fDAG.add(waitTask);
} else {
if (fActiveOpsTask && (fActiveOpsTask->fTargetView.proxy() == proxy.get())) {
if (fActiveOpsTask && (fActiveOpsTask->target(0).proxy() == proxy.get())) {
SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
fDAG.addBeforeLast(waitTask);
// In this case we keep the current renderTask open but just insert the new waitTask

View File

@ -360,11 +360,11 @@ inline void GrOpsTask::OpChain::validate() const {
GrOpsTask::GrOpsTask(GrDrawingManager* drawingMgr, GrRecordingContext::Arenas arenas,
GrSurfaceProxyView view,
GrAuditTrail* auditTrail)
: GrRenderTask(std::move(view))
: GrRenderTask()
, fArenas(arenas)
, fAuditTrail(auditTrail)
SkDEBUGCODE(, fNumClips(0)) {
drawingMgr->setLastRenderTask(fTargetView.proxy(), this);
this->addTarget(drawingMgr, std::move(view));
}
void GrOpsTask::deleteOps() {
@ -393,12 +393,6 @@ void GrOpsTask::endFlush(GrDrawingManager* drawingMgr) {
this->deleteOps();
fClipAllocator.reset();
GrSurfaceProxy* proxy = fTargetView.proxy();
if (proxy && this == drawingMgr->getLastRenderTask(proxy)) {
drawingMgr->setLastRenderTask(proxy, nullptr);
}
fTargetView.reset();
fDeferredProxies.reset();
fSampledProxies.reset();
fAuditTrail = nullptr;
@ -422,7 +416,7 @@ void GrOpsTask::onPrePrepare(GrRecordingContext* context) {
for (const auto& chain : fOpChains) {
if (chain.shouldExecute()) {
chain.head()->prePrepare(context,
&fTargetView,
&fTargets[0],
chain.appliedClip(),
chain.dstProxyView());
}
@ -430,7 +424,7 @@ void GrOpsTask::onPrePrepare(GrRecordingContext* context) {
}
void GrOpsTask::onPrepare(GrOpFlushState* flushState) {
SkASSERT(fTargetView.proxy()->peekRenderTarget());
SkASSERT(this->target(0).proxy()->peekRenderTarget());
SkASSERT(this->isClosed());
#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
@ -451,7 +445,7 @@ void GrOpsTask::onPrepare(GrOpFlushState* flushState) {
TRACE_EVENT0("skia.gpu", chain.head()->name());
#endif
GrOpFlushState::OpArgs opArgs(chain.head(),
&fTargetView,
&fTargets[0],
chain.appliedClip(),
chain.dstProxyView());
@ -459,7 +453,7 @@ void GrOpsTask::onPrepare(GrOpFlushState* flushState) {
// Temporary debugging helper: for debugging prePrepare w/o going through DDLs
// Delete once most of the GrOps have an onPrePrepare.
// chain.head()->prePrepare(flushState->gpu()->getContext(), &fTargetView,
// chain.head()->prePrepare(flushState->gpu()->getContext(), &this->target(0),
// chain.appliedClip());
// GrOp::prePrepare may or may not have been called at this point
@ -507,8 +501,8 @@ bool GrOpsTask::onExecute(GrOpFlushState* flushState) {
return false;
}
SkASSERT(fTargetView.proxy());
GrRenderTargetProxy* proxy = fTargetView.proxy()->asRenderTargetProxy();
SkASSERT(this->numTargets() == 1);
GrRenderTargetProxy* proxy = this->target(0).proxy()->asRenderTargetProxy();
SkASSERT(proxy);
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
@ -575,7 +569,7 @@ bool GrOpsTask::onExecute(GrOpFlushState* flushState) {
: GrStoreOp::kStore;
GrOpsRenderPass* renderPass = create_render_pass(
flushState->gpu(), proxy->peekRenderTarget(), stencil, fTargetView.origin(),
flushState->gpu(), proxy->peekRenderTarget(), stencil, this->target(0).origin(),
fClippedContentBounds, fColorLoadOp, fLoadClearColor, stencilLoadOp, stencilStoreOp,
fSampledProxies);
if (!renderPass) {
@ -594,7 +588,7 @@ bool GrOpsTask::onExecute(GrOpFlushState* flushState) {
#endif
GrOpFlushState::OpArgs opArgs(chain.head(),
&fTargetView,
&fTargets[0],
chain.appliedClip(),
chain.dstProxyView());
@ -614,7 +608,7 @@ void GrOpsTask::setColorLoadOp(GrLoadOp op, const SkPMColor4f& color) {
fColorLoadOp = op;
fLoadClearColor = color;
if (GrLoadOp::kClear == fColorLoadOp) {
GrSurfaceProxy* proxy = fTargetView.proxy();
GrSurfaceProxy* proxy = this->target(0).proxy();
SkASSERT(proxy);
fTotalBounds = proxy->backingStoreBoundsRect();
}
@ -635,7 +629,7 @@ bool GrOpsTask::resetForFullscreenClear(CanDiscardPreviousOps canDiscardPrevious
// If the opsTask is using a render target which wraps a vulkan command buffer, we can't do
// a clear load since we cannot change the render pass that we are using. Thus we fall back
// to making a clear op in this case.
return !fTargetView.asRenderTargetProxy()->wrapsVkSecondaryCB();
return !this->target(0).asRenderTargetProxy()->wrapsVkSecondaryCB();
}
// Could not empty the task, so an op must be added to handle the clear
@ -761,7 +755,7 @@ void GrOpsTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
alloc->addInterval(fDeferredProxies[i], 0, 0, GrResourceAllocator::ActualUse::kNo);
}
GrSurfaceProxy* targetProxy = fTargetView.proxy();
GrSurfaceProxy* targetProxy = this->target(0).proxy();
// Add the interval for all the writes to this GrOpsTasks's target
if (fOpChains.count()) {
@ -780,7 +774,7 @@ void GrOpsTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
auto gather = [ alloc SkDEBUGCODE(, this) ] (GrSurfaceProxy* p, GrMipMapped) {
alloc->addInterval(p, alloc->curOp(), alloc->curOp(), GrResourceAllocator::ActualUse::kYes
SkDEBUGCODE(, fTargetView.proxy() == p));
SkDEBUGCODE(, this->target(0).proxy() == p));
};
for (const OpChain& recordedOp : fOpChains) {
recordedOp.visitProxies(gather);
@ -796,7 +790,7 @@ void GrOpsTask::recordOp(
const DstProxyView* dstProxyView, const GrCaps& caps) {
SkDEBUGCODE(op->validate();)
SkASSERT(processorAnalysis.requiresDstTexture() == (dstProxyView && dstProxyView->proxy()));
GrSurfaceProxy* proxy = fTargetView.proxy();
GrSurfaceProxy* proxy = this->target(0).proxy();
SkASSERT(proxy);
// A closed GrOpsTask should never receive new/more ops
@ -896,11 +890,11 @@ GrRenderTask::ExpectedOutcome GrOpsTask::onMakeClosed(
fClosedObservers.reset();
});
if (!this->isNoOp()) {
GrSurfaceProxy* proxy = fTargetView.proxy();
GrSurfaceProxy* proxy = this->target(0).proxy();
// Use the entire backing store bounds since the GPU doesn't clip automatically to the
// logical dimensions.
SkRect clippedContentBounds = proxy->backingStoreBoundsRect();
// TODO: If we can fix up GLPrograms test to always intersect the fTargetView proxy bounds
// TODO: If we can fix up GLPrograms test to always intersect the target proxy bounds
// then we can simply assert here that the bounds intersect.
if (clippedContentBounds.intersect(fTotalBounds)) {
clippedContentBounds.roundOut(&fClippedContentBounds);

View File

@ -26,29 +26,24 @@ GrRenderTask::GrRenderTask()
, fFlags(0) {
}
GrRenderTask::GrRenderTask(GrSurfaceProxyView targetView)
: fTargetView(std::move(targetView))
, fUniqueID(CreateUniqueID())
, fFlags(0) {
}
void GrRenderTask::disown(GrDrawingManager* drawingMgr) {
if (this->isSetFlag(kDisowned_Flag)) {
return;
}
this->setFlag(kDisowned_Flag);
GrSurfaceProxy* proxy = fTargetView.proxy();
if (proxy && this == drawingMgr->getLastRenderTask(proxy)) {
// Ensure the drawing manager doesn't hold a dangling pointer.
drawingMgr->setLastRenderTask(proxy, nullptr);
for (const GrSurfaceProxyView& target : fTargets) {
if (this == drawingMgr->getLastRenderTask(target.proxy())) {
drawingMgr->setLastRenderTask(target.proxy(), nullptr);
}
}
}
#ifdef SK_DEBUG
GrRenderTask::~GrRenderTask() {
SkASSERT(this->isSetFlag(kDisowned_Flag));
}
#ifdef SK_DEBUG
bool GrRenderTask::deferredProxiesAreInstantiated() const {
for (int i = 0; i < fDeferredProxies.count(); ++i) {
if (!fDeferredProxies[i]->isInstantiated()) {
@ -67,13 +62,13 @@ void GrRenderTask::makeClosed(const GrCaps& caps) {
SkIRect targetUpdateBounds;
if (ExpectedOutcome::kTargetDirty == this->onMakeClosed(caps, &targetUpdateBounds)) {
GrSurfaceProxy* proxy = fTargetView.proxy();
GrSurfaceProxy* proxy = this->target(0).proxy();
if (proxy->requiresManualMSAAResolve()) {
SkASSERT(fTargetView.asRenderTargetProxy());
fTargetView.asRenderTargetProxy()->markMSAADirty(targetUpdateBounds,
fTargetView.origin());
SkASSERT(this->target(0).asRenderTargetProxy());
this->target(0).asRenderTargetProxy()->markMSAADirty(targetUpdateBounds,
this->target(0).origin());
}
GrTextureProxy* textureProxy = fTargetView.asTextureProxy();
GrTextureProxy* textureProxy = this->target(0).asTextureProxy();
if (textureProxy && GrMipMapped::kYes == textureProxy->mipMapped()) {
textureProxy->markMipMapsDirty();
}
@ -259,11 +254,11 @@ void GrRenderTask::closeThoseWhoDependOnMe(const GrCaps& caps) {
}
bool GrRenderTask::isInstantiated() const {
// Some renderTasks (e.g. GrTransferFromRenderTask) don't have a target.
GrSurfaceProxy* proxy = fTargetView.proxy();
if (!proxy) {
// Some renderTasks (e.g. GrTransferFromRenderTask) don't have any targets.
if (0 == this->numTargets()) {
return true;
}
GrSurfaceProxy* proxy = this->target(0).proxy();
if (!proxy->isInstantiated()) {
return false;
@ -277,16 +272,29 @@ bool GrRenderTask::isInstantiated() const {
return true;
}
void GrRenderTask::addTarget(GrDrawingManager* drawingMgr, GrSurfaceProxyView view) {
SkASSERT(view);
drawingMgr->setLastRenderTask(view.proxy(), this);
fTargets.push_back(std::move(view));
}
#ifdef SK_DEBUG
void GrRenderTask::dump(bool printDependencies) const {
SkDebugf("--------------------------------------------------------------\n");
GrSurfaceProxy* proxy = fTargetView.proxy();
SkDebugf("%s - renderTaskID: %d - proxyID: %d - surfaceID: %d\n",
this->name(), fUniqueID,
proxy ? proxy->uniqueID().asUInt() : -1,
proxy && proxy->peekSurface()
? proxy->peekSurface()->uniqueID().asUInt()
: -1);
SkDebugf("%s - renderTaskID: %d\n", this->name(), fUniqueID);
if (!fTargets.empty()) {
SkDebugf("Targets: \n");
for (int i = 0; i < fTargets.count(); ++i) {
GrSurfaceProxy* proxy = fTargets[i].proxy();
SkDebugf("[%d]: proxyID: %d - surfaceID: %d\n",
i,
proxy ? proxy->uniqueID().asUInt() : -1,
proxy && proxy->peekSurface()
? proxy->peekSurface()->uniqueID().asUInt()
: -1);
}
}
if (printDependencies) {
SkDebugf("I rely On (%d): ", fDependencies.count());

View File

@ -27,8 +27,7 @@ class GrTextureResolveRenderTask;
class GrRenderTask : public SkRefCnt {
public:
GrRenderTask();
GrRenderTask(GrSurfaceProxyView);
~GrRenderTask() override;
SkDEBUGCODE(~GrRenderTask() override);
void makeClosed(const GrCaps&);
@ -68,7 +67,8 @@ public:
bool dependsOn(const GrRenderTask* dependedOn) const;
uint32_t uniqueID() const { return fUniqueID; }
GrSurfaceProxyView targetView() const { return fTargetView; }
int numTargets() const { return fTargets.count(); }
const GrSurfaceProxyView& target(int i) const { return fTargets[i]; }
/*
* Safely cast this GrRenderTask to a GrOpsTask (if possible).
@ -88,8 +88,8 @@ public:
void visitTargetAndSrcProxies_debugOnly(const GrOp::VisitProxyFunc& fn) const {
this->visitProxies_debugOnly(fn);
if (fTargetView.proxy()) {
fn(fTargetView.proxy(), GrMipMapped::kNo);
for (int i = 0; i < this->numTargets(); ++i) {
fn(this->target(i).proxy(), GrMipMapped::kNo);
}
}
#endif
@ -101,6 +101,10 @@ protected:
SkDEBUGCODE(bool deferredProxiesAreInstantiated() const;)
// Add a target surface proxy to the list of targets for this task.
// This also informs the drawing manager to update the lastRenderTask association.
void addTarget(GrDrawingManager*, GrSurfaceProxyView);
enum class ExpectedOutcome : bool {
kTargetUnchanged,
kTargetDirty,
@ -113,7 +117,7 @@ protected:
// targetUpdateBounds must not extend beyond the proxy bounds.
virtual ExpectedOutcome onMakeClosed(const GrCaps&, SkIRect* targetUpdateBounds) = 0;
GrSurfaceProxyView fTargetView;
SkSTArray<1, GrSurfaceProxyView> fTargets;
// List of texture proxies whose contents are being prepared on a worker thread
// TODO: this list exists so we can fire off the proper upload when an renderTask begins
@ -125,15 +129,19 @@ private:
friend class GrDrawingManager;
// Drops any pending operations that reference proxies that are not instantiated.
// NOTE: Derived classes don't need to check fTargetView. That is handled when the
// NOTE: Derived classes don't need to check targets. That is handled when the
// drawingManager calls isInstantiated.
virtual void handleInternalAllocationFailure() = 0;
// Derived classes can override to indicate usage of proxies _other than target proxies_.
// GrRenderTask itself will handle checking the target proxies.
virtual bool onIsUsed(GrSurfaceProxy*) const = 0;
bool isUsed(GrSurfaceProxy* proxy) const {
if (proxy == fTargetView.proxy()) {
return true;
for (const GrSurfaceProxyView& target : fTargets) {
if (target.proxy() == proxy) {
return true;
}
}
return this->onIsUsed(proxy);

View File

@ -14,19 +14,12 @@
#include "src/gpu/GrResourceAllocator.h"
#include "src/gpu/GrTexturePriv.h"
void GrTextureResolveRenderTask::disown(GrDrawingManager* drawingMgr) {
for (const auto& resolve : fResolves) {
drawingMgr->setLastRenderTask(resolve.fProxy.get(), nullptr);
}
GrRenderTask::disown(drawingMgr);
}
void GrTextureResolveRenderTask::addProxy(GrDrawingManager* drawingMgr,
sk_sp<GrSurfaceProxy> proxyRef,
GrSurfaceProxy::ResolveFlags flags,
const GrCaps& caps) {
fResolves.emplace_back(std::move(proxyRef), flags);
GrSurfaceProxy* proxy = fResolves.back().fProxy.get();
Resolve& resolve = fResolves.emplace_back(flags);
GrSurfaceProxy* proxy = proxyRef.get();
// Ensure the last render task that operated on the proxy is closed. That's where msaa and
// mipmaps should have been marked dirty.
@ -38,7 +31,7 @@ void GrTextureResolveRenderTask::addProxy(GrDrawingManager* drawingMgr,
GrRenderTargetProxy* renderTargetProxy = proxy->asRenderTargetProxy();
SkASSERT(renderTargetProxy);
SkASSERT(renderTargetProxy->isMSAADirty());
fResolves.back().fMSAAResolveRect = renderTargetProxy->msaaDirtyRect();
resolve.fMSAAResolveRect = renderTargetProxy->msaaDirtyRect();
renderTargetProxy->markMSAAResolved();
}
@ -53,7 +46,7 @@ void GrTextureResolveRenderTask::addProxy(GrDrawingManager* drawingMgr,
// generating mipmap levels and/or resolving MSAA.
this->addDependency(drawingMgr, proxy, GrMipMapped::kNo,
GrTextureResolveManager(nullptr), caps);
drawingMgr->setLastRenderTask(proxy, this);
this->addTarget(drawingMgr, GrSurfaceProxyView(std::move(proxyRef)));
}
void GrTextureResolveRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
@ -61,8 +54,9 @@ void GrTextureResolveRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc
// fEndOfOpsTaskOpIndices will remain in sync. We create fake op#'s to capture the fact that we
// manipulate the resolve proxies.
auto fakeOp = alloc->curOp();
for (const auto& resolve : fResolves) {
alloc->addInterval(resolve.fProxy.get(), fakeOp, fakeOp,
SkASSERT(fResolves.count() == this->numTargets());
for (const GrSurfaceProxyView& target : fTargets) {
alloc->addInterval(target.proxy(), fakeOp, fakeOp,
GrResourceAllocator::ActualUse::kYes);
}
alloc->incOps();
@ -70,9 +64,11 @@ void GrTextureResolveRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc
bool GrTextureResolveRenderTask::onExecute(GrOpFlushState* flushState) {
// Resolve all msaa back-to-back, before regenerating mipmaps.
for (const auto& resolve : fResolves) {
SkASSERT(fResolves.count() == this->numTargets());
for (int i = 0; i < fResolves.count(); ++i) {
const Resolve& resolve = fResolves[i];
if (GrSurfaceProxy::ResolveFlags::kMSAA & resolve.fFlags) {
GrSurfaceProxy* proxy = resolve.fProxy.get();
GrSurfaceProxy* proxy = this->target(i).proxy();
// peekRenderTarget might be null if there was an instantiation error.
if (GrRenderTarget* renderTarget = proxy->peekRenderTarget()) {
flushState->gpu()->resolveRenderTarget(renderTarget, resolve.fMSAAResolveRect,
@ -81,10 +77,11 @@ bool GrTextureResolveRenderTask::onExecute(GrOpFlushState* flushState) {
}
}
// Regenerate all mipmaps back-to-back.
for (const auto& resolve : fResolves) {
for (int i = 0; i < fResolves.count(); ++i) {
const Resolve& resolve = fResolves[i];
if (GrSurfaceProxy::ResolveFlags::kMipMaps & resolve.fFlags) {
// peekTexture might be null if there was an instantiation error.
GrTexture* texture = resolve.fProxy->peekTexture();
GrTexture* texture = this->target(i).proxy()->peekTexture();
if (texture && texture->texturePriv().mipMapsAreDirty()) {
flushState->gpu()->regenerateMipMapLevels(texture);
SkASSERT(!texture->texturePriv().mipMapsAreDirty());
@ -96,9 +93,5 @@ bool GrTextureResolveRenderTask::onExecute(GrOpFlushState* flushState) {
}
#ifdef SK_DEBUG
void GrTextureResolveRenderTask::visitProxies_debugOnly(const GrOp::VisitProxyFunc& fn) const {
for (const auto& resolve : fResolves) {
fn(resolve.fProxy.get(), GrMipMapped::kNo);
}
}
void GrTextureResolveRenderTask::visitProxies_debugOnly(const GrOp::VisitProxyFunc& fn) const {}
#endif

View File

@ -14,15 +14,11 @@ class GrTextureResolveRenderTask final : public GrRenderTask {
public:
GrTextureResolveRenderTask() : GrRenderTask() {}
void disown(GrDrawingManager*) override;
void addProxy(GrDrawingManager*, sk_sp<GrSurfaceProxy> proxy,
GrSurfaceProxy::ResolveFlags, const GrCaps&);
private:
bool onIsUsed(GrSurfaceProxy* proxy) const override {
// This case should be handled by GrRenderTask.
SkASSERT(proxy != fTargetView.proxy());
return false;
}
void handleInternalAllocationFailure() override {
@ -42,9 +38,7 @@ private:
#endif
struct Resolve {
Resolve(sk_sp<GrSurfaceProxy> proxy, GrSurfaceProxy::ResolveFlags flags)
: fProxy(std::move(proxy)), fFlags(flags) {}
sk_sp<GrSurfaceProxy> fProxy;
Resolve(GrSurfaceProxy::ResolveFlags flags) : fFlags(flags) {}
GrSurfaceProxy::ResolveFlags fFlags;
SkIRect fMSAAResolveRect;
};

View File

@ -28,7 +28,7 @@ public:
private:
bool onIsUsed(GrSurfaceProxy* proxy) const override {
SkASSERT(!fTargetView.proxy());
SkASSERT(0 == this->numTargets());
return proxy == fSrcProxy.get();
}
// If fSrcProxy is uninstantiated at flush time we simply will skip doing the transfer.

View File

@ -14,8 +14,10 @@
void GrWaitRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
// This renderTask doesn't have "normal" ops. In this case we still need to add an interval (so
// fEndOfOpsTaskOpIndices will remain in sync), so we create a fake op# to capture the fact that
// we manipulate fTargetView's proxy.
alloc->addInterval(fTargetView.proxy(), alloc->curOp(), alloc->curOp(),
// we manipulate our target's proxy.
SkASSERT(0 == this->numTargets());
auto fakeOp = alloc->curOp();
alloc->addInterval(fWaitedOn.proxy(), fakeOp, fakeOp,
GrResourceAllocator::ActualUse::kYes);
alloc->incOps();
}

View File

@ -16,15 +16,14 @@ public:
GrWaitRenderTask(GrSurfaceProxyView surfaceView,
std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
int numSemaphores)
: GrRenderTask(std::move(surfaceView))
: GrRenderTask()
, fSemaphores(std::move(semaphores))
, fNumSemaphores(numSemaphores) {}
, fNumSemaphores(numSemaphores)
, fWaitedOn(std::move(surfaceView)) {}
private:
bool onIsUsed(GrSurfaceProxy* proxy) const override {
// This case should be handled by GrRenderTask.
SkASSERT(proxy != fTargetView.proxy());
return false;
return proxy == fWaitedOn.proxy();
}
void handleInternalAllocationFailure() override {}
void gatherProxyIntervals(GrResourceAllocator*) const override;
@ -42,6 +41,11 @@ private:
#endif
std::unique_ptr<std::unique_ptr<GrSemaphore>[]> fSemaphores;
int fNumSemaphores;
// This field is separate from the main "targets" field on GrRenderTask because this task
// does not actually write to the surface and so should not participate in the normal
// lastRenderTask tracking that written-to targets do.
GrSurfaceProxyView fWaitedOn;
};
#endif