Reland "Split apart flushing and submitting in the GrGpu classes and GrDrawingManager."

This reverts commit 2faa33772b.

Reason for revert: <INSERT REASONING HERE>

Original change's description:
> Revert "Split apart flushing and submitting in the GrGpu classes and GrDrawingManager."
> 
> This reverts commit 6341d92280.
> 
> Reason for revert: metal tests failing
> 
> Original change's description:
> > Split apart flushing and submitting in the GrGpu classes and GrDrawingManager.
> > 
> > Bug: skia:10118
> > Change-Id: I53e3b9f1bd28a00276a3d35b5160aa0cfec30cfd
> > Reviewed-on: https://skia-review.googlesource.com/c/skia/+/282417
> > Reviewed-by: Jim Van Verth <jvanverth@google.com>
> > Reviewed-by: Brian Salomon <bsalomon@google.com>
> > Commit-Queue: Greg Daniel <egdaniel@google.com>
> 
> TBR=egdaniel@google.com,jvanverth@google.com,bsalomon@google.com,senorblanco@chromium.org
> 
> Change-Id: I2cb98b470e3a066c09012b686e9942edb5a3979b
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: skia:10118
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/282852
> Reviewed-by: Greg Daniel <egdaniel@google.com>
> Commit-Queue: Greg Daniel <egdaniel@google.com>

Bug: skia:10118
Change-Id: I9a77abe995a991275a87ee1d38bcab4deb361a9d
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/282976
Reviewed-by: Greg Daniel <egdaniel@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
This commit is contained in:
Greg Daniel 2020-04-10 17:43:51 +00:00 committed by Skia Commit-Bot
parent 95c26efc90
commit fe159621e6
22 changed files with 211 additions and 151 deletions

View File

@ -319,8 +319,17 @@ GrSemaphoresSubmitted GrContext::flush(const GrFlushInfo& info,
return GrSemaphoresSubmitted::kNo;
}
return this->drawingManager()->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
info, externalRequests);
bool submitted = false;
if (this->drawingManager()->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
info, externalRequests)) {
bool forceSync = SkToBool(info.fFlags & kSyncCpu_GrFlushFlag);
submitted = this->drawingManager()->submitToGpu(forceSync);
}
if (!submitted || (!this->priv().caps()->semaphoreSupport() && info.fNumSemaphores)) {
return GrSemaphoresSubmitted::kNo;
}
return GrSemaphoresSubmitted::kYes;
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -221,7 +221,7 @@ void GrDrawingManager::freeGpuResources() {
}
// MDB TODO: make use of the 'proxy' parameter.
GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int numProxies,
bool GrDrawingManager::flush(GrSurfaceProxy* proxies[], int numProxies,
SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info,
const GrPrepareForExternalIORequests& externalRequests) {
SkASSERT(numProxies >= 0);
@ -232,7 +232,7 @@ GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int num
if (info.fFinishedProc) {
info.fFinishedProc(info.fFinishedContext);
}
return GrSemaphoresSubmitted::kNo;
return false;
}
SkDEBUGCODE(this->validate());
@ -244,7 +244,7 @@ GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int num
canSkip = !fDAG.isUsed(proxies[i]) && !this->isDDLTarget(proxies[i]);
}
if (canSkip) {
return GrSemaphoresSubmitted::kNo;
return false;
}
}
@ -253,7 +253,7 @@ GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int num
if (info.fFinishedProc) {
info.fFinishedProc(info.fFinishedContext);
}
return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
return false; // Can't flush while DDL recording
}
direct->priv().clientMappedBufferManager()->process();
@ -262,7 +262,7 @@ GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int num
if (info.fFinishedProc) {
info.fFinishedProc(info.fFinishedContext);
}
return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
return false; // Can't flush while DDL recording
}
fFlushing = true;
@ -394,8 +394,7 @@ GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int num
opMemoryPool->isEmpty();
#endif
GrSemaphoresSubmitted result = gpu->finishFlush(proxies, numProxies, access, info,
externalRequests);
gpu->executeFlushInfo(proxies, numProxies, access, info, externalRequests);
// Give the cache a chance to purge resources that become purgeable due to flushing.
if (flushed) {
@ -413,7 +412,20 @@ GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int num
fFlushingRenderTaskIDs.reset();
fFlushing = false;
return result;
return true;
}
bool GrDrawingManager::submitToGpu(bool syncToCpu) {
if (fFlushing || this->wasAbandoned()) {
return false;
}
auto direct = fContext->priv().asDirectContext();
if (!direct) {
return false; // Can't submit while DDL recording
}
GrGpu* gpu = direct->priv().getGpu();
return gpu->submitToGpu(syncToCpu);
}
bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState* flushState,
@ -462,8 +474,7 @@ bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlu
onFlushRenderTask = nullptr;
(*numRenderTasksExecuted)++;
if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
GrFlushInfo(), GrPrepareForExternalIORequests());
flushState->gpu()->submitToGpu(false);
*numRenderTasksExecuted = 0;
}
}
@ -481,8 +492,7 @@ bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlu
}
(*numRenderTasksExecuted)++;
if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
GrFlushInfo(), GrPrepareForExternalIORequests());
flushState->gpu()->submitToGpu(false);
*numRenderTasksExecuted = 0;
}
}
@ -523,12 +533,12 @@ GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(GrSurfaceProxy* proxies[],
// TODO: It is important to upgrade the drawingmanager to just flushing the
// portion of the DAG required by 'proxies' in order to restore some of the
// semantics of this method.
GrSemaphoresSubmitted result = this->flush(proxies, numProxies, access, info,
GrPrepareForExternalIORequests());
bool didFlush = this->flush(proxies, numProxies, access, info,
GrPrepareForExternalIORequests());
for (int i = 0; i < numProxies; ++i) {
GrSurfaceProxy* proxy = proxies[i];
if (!proxy->isInstantiated()) {
return result;
continue;
}
// In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
// because the client will call through to this method when drawing into a target created by
@ -558,7 +568,16 @@ GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(GrSurfaceProxy* proxies[],
}
SkDEBUGCODE(this->validate());
return result;
bool submitted = false;
if (didFlush) {
submitted = this->submitToGpu(SkToBool(info.fFlags & kSyncCpu_GrFlushFlag));
}
if (!submitted || (!direct->priv().caps()->semaphoreSupport() && info.fNumSemaphores)) {
return GrSemaphoresSubmitted::kNo;
}
return GrSemaphoresSubmitted::kYes;
}
void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
@ -910,8 +929,10 @@ void GrDrawingManager::flushIfNecessary() {
auto resourceCache = direct->priv().getResourceCache();
if (resourceCache && resourceCache->requestsFlush()) {
this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(),
GrPrepareForExternalIORequests());
if (this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(),
GrPrepareForExternalIORequests())) {
this->submitToGpu(false);
}
resourceCache->purgeAsNeeded();
}
}

View File

@ -182,11 +182,13 @@ private:
bool executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState*,
int* numRenderTasksExecuted);
GrSemaphoresSubmitted flush(GrSurfaceProxy* proxies[],
int numProxies,
SkSurface::BackendSurfaceAccess access,
const GrFlushInfo&,
const GrPrepareForExternalIORequests&);
bool flush(GrSurfaceProxy* proxies[],
int numProxies,
SkSurface::BackendSurfaceAccess access,
const GrFlushInfo&,
const GrPrepareForExternalIORequests&);
bool submitToGpu(bool syncToCpu);
SkDEBUGCODE(void validate() const);

View File

@ -646,16 +646,13 @@ void GrGpu::validateStagingBuffers() const {
}
#endif
GrSemaphoresSubmitted GrGpu::finishFlush(GrSurfaceProxy* proxies[],
int n,
SkSurface::BackendSurfaceAccess access,
const GrFlushInfo& info,
const GrPrepareForExternalIORequests& externalRequests) {
void GrGpu::executeFlushInfo(GrSurfaceProxy* proxies[],
int numProxies,
SkSurface::BackendSurfaceAccess access,
const GrFlushInfo& info,
const GrPrepareForExternalIORequests& externalRequests) {
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
#ifdef SK_DEBUG
this->validateStagingBuffers();
#endif
this->stats()->incNumFinishFlushes();
GrResourceProvider* resourceProvider = fContext->priv().resourceProvider();
std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores(
@ -678,21 +675,30 @@ GrSemaphoresSubmitted GrGpu::finishFlush(GrSurfaceProxy* proxies[],
}
}
if (info.fFinishedProc) {
this->addFinishedProc(info.fFinishedProc, info.fFinishedContext);
}
this->prepareSurfacesForBackendAccessAndExternalIO(proxies, numProxies, access,
externalRequests);
}
bool GrGpu::submitToGpu(bool syncCpu) {
this->stats()->incNumSubmitToGpus();
#ifdef SK_DEBUG
this->validateStagingBuffers();
#endif
this->unmapStagingBuffers();
if (!this->onFinishFlush(proxies, n, access, info, externalRequests)) {
return GrSemaphoresSubmitted::kNo;
}
bool submitted = this->onSubmitToGpu(syncCpu);
// Move all active staging buffers to the busy list.
// TODO: this should probably be handled inside of the onSubmitToGpu by the backends.
while (GrStagingBuffer* buffer = fActiveStagingBuffers.head()) {
fActiveStagingBuffers.remove(buffer);
fBusyStagingBuffers.addToTail(buffer);
}
return this->caps()->semaphoreSupport() ? GrSemaphoresSubmitted::kYes
: GrSemaphoresSubmitted::kNo;
return submitted;
}
#ifdef SK_ENABLE_DUMP_GPU

View File

@ -360,9 +360,11 @@ public:
// Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
// insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the
// inserted semaphores.
GrSemaphoresSubmitted finishFlush(GrSurfaceProxy*[], int n,
SkSurface::BackendSurfaceAccess access, const GrFlushInfo&,
const GrPrepareForExternalIORequests&);
void executeFlushInfo(GrSurfaceProxy*[], int numProxies,
SkSurface::BackendSurfaceAccess access, const GrFlushInfo&,
const GrPrepareForExternalIORequests&);
bool submitToGpu(bool syncCpu);
virtual void submit(GrOpsRenderPass*) = 0;
@ -433,8 +435,8 @@ public:
int numFailedDraws() const { return fNumFailedDraws; }
void incNumFailedDraws() { ++fNumFailedDraws; }
int numFinishFlushes() const { return fNumFinishFlushes; }
void incNumFinishFlushes() { ++fNumFinishFlushes; }
int numSubmitToGpus() const { return fNumSubmitToGpus; }
void incNumSubmitToGpus() { ++fNumSubmitToGpus; }
int numScratchTexturesReused() const { return fNumScratchTexturesReused; }
void incNumScratchTexturesReused() { ++fNumScratchTexturesReused; }
@ -482,7 +484,7 @@ public:
int fStencilAttachmentCreates = 0;
int fNumDraws = 0;
int fNumFailedDraws = 0;
int fNumFinishFlushes = 0;
int fNumSubmitToGpus = 0;
int fNumScratchTexturesReused = 0;
int fNumInlineCompilationFailures = 0;
@ -509,7 +511,7 @@ public:
void incStencilAttachmentCreates() {}
void incNumDraws() {}
void incNumFailedDraws() {}
void incNumFinishFlushes() {}
void incNumSubmitToGpus() {}
void incNumInlineCompilationFailures() {}
void incNumInlineProgramCacheResult(ProgramCacheResult stat) {}
void incNumPreCompilationFailures() {}
@ -805,8 +807,14 @@ private:
virtual bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
const SkIPoint& dstPoint) = 0;
virtual bool onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo&, const GrPrepareForExternalIORequests&) = 0;
virtual void addFinishedProc(GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) = 0;
virtual void prepareSurfacesForBackendAccessAndExternalIO(
GrSurfaceProxy* proxies[], int numProxies, SkSurface::BackendSurfaceAccess access,
const GrPrepareForExternalIORequests& externalRequests) {}
virtual bool onSubmitToGpu(bool syncCpu) = 0;
#ifdef SK_ENABLE_DUMP_GPU
virtual void onDumpJSON(SkJSONWriter*) const {}

View File

@ -160,11 +160,14 @@ private:
void onResolveRenderTarget(GrRenderTarget* target, const SkIRect&, ForExternalIO) override {}
bool onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo& info, const GrPrepareForExternalIORequests&) override {
if (info.fFinishedProc) {
info.fFinishedProc(info.fFinishedContext);
}
void addFinishedProc(GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) override {
// TODO: have this actually wait before calling the proc
SkASSERT(finishedProc);
finishedProc(finishedContext);
}
bool onSubmitToGpu(bool syncCpu) override {
return true;
}

View File

@ -442,8 +442,12 @@ void GrDawnGpu::flush() {
fDevice.Tick();
}
bool GrDawnGpu::onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo& info, const GrPrepareForExternalIORequests&) {
void GrDawnGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) {
// TODO: implement
}
bool GrDawnGpu::onSubmitToGpu(bool syncCpu) {
this->flush();
return true;
}
@ -516,7 +520,7 @@ bool GrDawnGpu::onReadPixels(GrSurface* surface, int left, int top, int width, i
wgpu::Extent3D copySize = {(uint32_t) width, (uint32_t) height, 1};
this->getCopyEncoder().CopyTextureToBuffer(&srcTexture, &dstBuffer, &copySize);
flush();
this->flush();
const void *readPixelsPtr = nullptr;
buf.MapReadAsync(callback, &readPixelsPtr);

View File

@ -172,8 +172,10 @@ private:
bool onCopySurface(GrSurface* dst, GrSurface* src,
const SkIRect& srcRect, const SkIPoint& dstPoint) override;
bool onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo& info, const GrPrepareForExternalIORequests&) override;
void addFinishedProc(GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) override;
bool onSubmitToGpu(bool syncCpu) override;
void mapStagingBuffers();

View File

@ -3784,37 +3784,35 @@ GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLG
return attribState;
}
bool GrGLGpu::onFinishFlush(GrSurfaceProxy*[], int, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo& info, const GrPrepareForExternalIORequests&) {
// If we inserted semaphores during the flush, we need to call GLFlush.
bool insertedSemaphore = info.fNumSemaphores > 0 && this->caps()->semaphoreSupport();
// We call finish if the client told us to sync or if we have a finished proc but don't support
// GLsync objects.
bool finish = (info.fFlags & kSyncCpu_GrFlushFlag) ||
(info.fFinishedProc && !this->caps()->fenceSyncSupport());
if (finish) {
void GrGLGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) {
SkASSERT(finishedProc);
FinishCallback callback;
callback.fCallback = finishedProc;
callback.fContext = finishedContext;
if (this->caps()->fenceSyncSupport()) {
callback.fSync = (GrGLsync)this->insertFence();
} else {
callback.fSync = 0;
}
fFinishCallbacks.push_back(callback);
}
bool GrGLGpu::onSubmitToGpu(bool syncCpu) {
if (syncCpu || (!fFinishCallbacks.empty() && !this->caps()->fenceSyncSupport())) {
GL_CALL(Finish());
// After a finish everything previously sent to GL is done.
for (const auto& cb : fFinishCallbacks) {
cb.fCallback(cb.fContext);
this->deleteSync(cb.fSync);
if (cb.fSync) {
this->deleteSync(cb.fSync);
} else {
SkASSERT(!this->caps()->fenceSyncSupport());
}
}
fFinishCallbacks.clear();
if (info.fFinishedProc) {
info.fFinishedProc(info.fFinishedContext);
}
} else {
if (info.fFinishedProc) {
FinishCallback callback;
callback.fCallback = info.fFinishedProc;
callback.fContext = info.fFinishedContext;
callback.fSync = (GrGLsync)this->insertFence();
fFinishCallbacks.push_back(callback);
GL_CALL(Flush());
} else if (insertedSemaphore) {
// Must call flush after semaphores in case they are waited on another GL context.
GL_CALL(Flush());
}
GL_CALL(Flush());
// See if any previously inserted finish procs are good to go.
this->checkFinishProcs();
}

View File

@ -306,8 +306,10 @@ private:
void flushBlendAndColorWrite(const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle&);
bool onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo&, const GrPrepareForExternalIORequests&) override;
void addFinishedProc(GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) override;
bool onSubmitToGpu(bool syncCpu) override;
bool waitSync(GrGLsync, uint64_t timeout, bool flush);

View File

@ -131,11 +131,13 @@ private:
void onResolveRenderTarget(GrRenderTarget* target, const SkIRect&, ForExternalIO) override {}
bool onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo& info, const GrPrepareForExternalIORequests&) override {
if (info.fFinishedProc) {
info.fFinishedProc(info.fFinishedContext);
}
void addFinishedProc(GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) override {
SkASSERT(finishedProc);
finishedProc(finishedContext);
}
bool onSubmitToGpu(bool syncCpu) override {
return true;
}

View File

@ -199,8 +199,10 @@ private:
void resolveTexture(id<MTLTexture> colorTexture, id<MTLTexture> resolveTexture);
bool onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo& info, const GrPrepareForExternalIORequests&) override;
void addFinishedProc(GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) override;
bool onSubmitToGpu(bool syncCpu) override;
// Function that uploads data onto textures with private storage mode (GPU access only).
bool uploadToTexture(GrMtlTexture* tex, int left, int top, int width, int height,

View File

@ -187,30 +187,26 @@ void GrMtlGpu::submitCommandBuffer(SyncQueue sync) {
}
}
bool GrMtlGpu::onFinishFlush(GrSurfaceProxy*[], int, SkSurface::BackendSurfaceAccess,
const GrFlushInfo& info, const GrPrepareForExternalIORequests&) {
bool forceSync = SkToBool(info.fFlags & kSyncCpu_GrFlushFlag) ||
(info.fFinishedProc && !this->mtlCaps().fenceSyncSupport());
// TODO: do we care about info.fSemaphore?
if (forceSync) {
void GrMtlGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) {
SkASSERT(finishedProc);
SkASSERT(this->caps()->fenceSyncSupport());
FinishCallback callback;
callback.fCallback = finishedProc;
callback.fContext = finishedContext;
callback.fFence = this->insertFence();
fFinishCallbacks.push_back(callback);
}
bool GrMtlGpu::onSubmitToGpu(bool syncCpu) {
if (syncCpu) {
this->submitCommandBuffer(kForce_SyncQueue);
// After a forced sync everything previously sent to the GPU is done.
for (const auto& cb : fFinishCallbacks) {
cb.fCallback(cb.fContext);
this->deleteFence(cb.fFence);
}
fFinishCallbacks.clear();
if (info.fFinishedProc) {
info.fFinishedProc(info.fFinishedContext);
}
} else {
if (info.fFinishedProc) {
FinishCallback callback;
callback.fCallback = info.fFinishedProc;
callback.fContext = info.fFinishedContext;
callback.fFence = this->insertFence();
fFinishCallbacks.push_back(callback);
}
this->submitCommandBuffer(kSkip_SyncQueue);
}
return true;

View File

@ -642,7 +642,7 @@ void GrVkPrimaryCommandBuffer::onReleaseResources() {
for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
fSecondaryCommandBuffers[i]->releaseResources();
}
fFinishedProcs.reset();
this->callFinishedProcs();
}
void GrVkPrimaryCommandBuffer::recycleSecondaryCommandBuffers(GrVkCommandPool* cmdPool) {

View File

@ -299,6 +299,10 @@ public:
void addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc);
void callFinishedProcs() {
fFinishedProcs.reset();
}
void recycleSecondaryCommandBuffers(GrVkCommandPool* cmdPool);
private:

View File

@ -312,19 +312,20 @@ GrOpsRenderPass* GrVkGpu::getOpsRenderPass(
return fCachedOpsRenderPass.get();
}
bool GrVkGpu::submitCommandBuffer(SyncQueue sync, GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) {
bool GrVkGpu::submitCommandBuffer(SyncQueue sync) {
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
SkASSERT(fCurrentCmdBuffer);
SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
if (!fCurrentCmdBuffer->hasWork() && kForce_SyncQueue != sync &&
!fSemaphoresToSignal.count() && !fSemaphoresToWaitOn.count()) {
// We may have added finished procs during the flush call. Since there is no actual work
// we are not submitting the command buffer and may never come back around to submit it.
// Thus we call all current finished procs manually, since the work has technically
// finished.
fCurrentCmdBuffer->callFinishedProcs();
SkASSERT(fDrawables.empty());
fResourceProvider.checkCommandBuffers();
if (finishedProc) {
fResourceProvider.addFinishedProcToActiveCommandBuffers(finishedProc, finishedContext);
}
return true;
}
@ -338,11 +339,6 @@ bool GrVkGpu::submitCommandBuffer(SyncQueue sync, GrGpuFinishedProc finishedProc
fCurrentCmdBuffer->forceSync(this);
}
if (finishedProc) {
// Make sure this is called after closing the current command pool
fResourceProvider.addFinishedProcToActiveCommandBuffers(finishedProc, finishedContext);
}
// We must delete any drawables that had to wait until submit to destroy.
fDrawables.reset();
@ -2099,16 +2095,16 @@ void GrVkGpu::addImageMemoryBarrier(const GrManagedResource* resource,
barrier);
}
bool GrVkGpu::onFinishFlush(GrSurfaceProxy* proxies[], int n,
SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info,
const GrPrepareForExternalIORequests& externalRequests) {
SkASSERT(n >= 0);
SkASSERT(!n || proxies);
void GrVkGpu::prepareSurfacesForBackendAccessAndExternalIO(
GrSurfaceProxy* proxies[], int numProxies, SkSurface::BackendSurfaceAccess access,
const GrPrepareForExternalIORequests& externalRequests) {
SkASSERT(numProxies >= 0);
SkASSERT(!numProxies || proxies);
// Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
// not effect what we do here.
if (n && access == SkSurface::BackendSurfaceAccess::kPresent) {
if (numProxies && access == SkSurface::BackendSurfaceAccess::kPresent) {
GrVkImage* image;
for (int i = 0; i < n; ++i) {
for (int i = 0; i < numProxies; ++i) {
SkASSERT(proxies[i]->isInstantiated());
if (GrTexture* tex = proxies[i]->peekTexture()) {
image = static_cast<GrVkTexture*>(tex);
@ -2170,13 +2166,19 @@ bool GrVkGpu::onFinishFlush(GrSurfaceProxy* proxies[], int n,
vkRT->prepareForExternal(this);
}
}
}
if (info.fFlags & kSyncCpu_GrFlushFlag) {
return this->submitCommandBuffer(kForce_SyncQueue, info.fFinishedProc,
info.fFinishedContext);
void GrVkGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) {
SkASSERT(finishedProc);
fResourceProvider.addFinishedProcToActiveCommandBuffers(finishedProc, finishedContext);
}
bool GrVkGpu::onSubmitToGpu(bool syncCpu) {
if (syncCpu) {
return this->submitCommandBuffer(kForce_SyncQueue);
} else {
return this->submitCommandBuffer(kSkip_SyncQueue, info.fFinishedProc,
info.fFinishedContext);
return this->submitCommandBuffer(kSkip_SyncQueue);
}
}

View File

@ -243,8 +243,14 @@ private:
bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
const SkIPoint& dstPoint) override;
bool onFinishFlush(GrSurfaceProxy*[], int, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo&, const GrPrepareForExternalIORequests&) override;
void addFinishedProc(GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) override;
void prepareSurfacesForBackendAccessAndExternalIO(
GrSurfaceProxy* proxies[], int numProxies, SkSurface::BackendSurfaceAccess access,
const GrPrepareForExternalIORequests& externalRequests) override;
bool onSubmitToGpu(bool syncCpu) override;
// Ends and submits the current command buffer to the queue and then creates a new command
// buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all
@ -252,8 +258,7 @@ private:
// fSemaphoreToSignal, we will add those signal semaphores to the submission of this command
// buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those
// wait semaphores to the submission of this command buffer.
bool submitCommandBuffer(SyncQueue sync, GrGpuFinishedProc finishedProc = nullptr,
GrGpuFinishedContext finishedContext = nullptr);
bool submitCommandBuffer(SyncQueue sync);
void copySurfaceAsCopyImage(GrSurface* dst, GrSurface* src, GrVkImage* dstImage,
GrVkImage* srcImage, const SkIRect& srcRect,

View File

@ -347,10 +347,8 @@ void GrVkResourceProvider::addFinishedProcToActiveCommandBuffers(
sk_sp<GrRefCntedCallback> procRef(new GrRefCntedCallback(finishedProc, finishedContext));
for (int i = 0; i < fActiveCommandPools.count(); ++i) {
GrVkCommandPool* pool = fActiveCommandPools[i];
if (!pool->isOpen()) {
GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
buffer->addFinishedProc(procRef);
}
GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
buffer->addFinishedProc(procRef);
}
}

View File

@ -70,7 +70,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrOpsTaskFlushCount, reporter, ctxInfo) {
// In total we make 2000 oplists. Our current limit on max oplists between flushes is 100, so we
// should do 20 flushes while executing oplists. Additionaly we always do 1 flush at the end of
// executing all oplists. So in total we should see 21 flushes here.
REPORTER_ASSERT(reporter, gpu->stats()->numFinishFlushes() == 21);
REPORTER_ASSERT(reporter, gpu->stats()->numSubmitToGpus() == 21);
SkBitmap readbackBitmap;
readbackBitmap.allocN32Pixels(1000, 1);

View File

@ -1380,8 +1380,8 @@ DEF_GPUTEST_FOR_ALL_CONTEXTS(ImageFlush, reporter, ctxInfo) {
// Flush all the setup work we did above and then make little lambda that reports the flush
// count delta since the last time it was called.
c->flush();
auto numFlushes = [c, flushCnt = c->priv().getGpu()->stats()->numFinishFlushes()]() mutable {
int curr = c->priv().getGpu()->stats()->numFinishFlushes();
auto numFlushes = [c, flushCnt = c->priv().getGpu()->stats()->numSubmitToGpus()]() mutable {
int curr = c->priv().getGpu()->stats()->numSubmitToGpus();
int n = curr - flushCnt;
flushCnt = curr;
return n;

View File

@ -1707,7 +1707,7 @@ DEF_GPUTEST_FOR_MOCK_CONTEXT(OverbudgetFlush, reporter, ctxInfo) {
// Helper that checks whether a flush has occurred between calls.
int baseFlushCount = 0;
auto getFlushCountDelta = [context, &baseFlushCount]() {
int cur = context->priv().getGpu()->stats()->numFinishFlushes();
int cur = context->priv().getGpu()->stats()->numSubmitToGpus();
int delta = cur - baseFlushCount;
baseFlushCount = cur;
return delta;

View File

@ -330,11 +330,8 @@ void basic_transfer_from_test(skiatest::Reporter* reporter, const sk_gpu_test::C
}
++expectedTransferCnt;
GrFlushInfo flushInfo;
flushInfo.fFlags = kSyncCpu_GrFlushFlag;
if (context->priv().caps()->mapBufferFlags() & GrCaps::kAsyncRead_MapFlag) {
gpu->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, flushInfo,
GrPrepareForExternalIORequests());
gpu->submitToGpu(true);
}
// Copy the transfer buffer contents to a temporary so we can manipulate it.
@ -374,8 +371,7 @@ void basic_transfer_from_test(skiatest::Reporter* reporter, const sk_gpu_test::C
++expectedTransferCnt;
if (context->priv().caps()->mapBufferFlags() & GrCaps::kAsyncRead_MapFlag) {
gpu->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, flushInfo,
GrPrepareForExternalIORequests());
gpu->submitToGpu(true);
}
map = reinterpret_cast<const char*>(buffer->map());