diff --git a/include/gpu/GrContext.h b/include/gpu/GrContext.h index 65a0ba7391..10231350cc 100644 --- a/include/gpu/GrContext.h +++ b/include/gpu/GrContext.h @@ -256,7 +256,7 @@ public: * Call to ensure all drawing to the context has been issued to the underlying 3D API. */ void flush() { - this->flush(GrFlushInfo()); + this->flush(GrFlushInfo(), GrPrepareForExternalIORequests()); } /** @@ -265,9 +265,31 @@ public: * If this call returns GrSemaphoresSubmitted::kNo, the GPU backend will not have created or * added any semaphores to signal on the GPU. Thus the client should not have the GPU wait on * any of the semaphores passed in with the GrFlushInfo. However, any pending commands to the - * context will still be flushed. + * context will still be flushed. It should be emphasized that a return value of + * GrSemaphoresSubmitted::kNo does not mean the flush did not happen. It simply means there were + * no semaphores submitted to the GPU. A caller should only take this as a failure if they + * passed in semaphores to be submitted. */ - GrSemaphoresSubmitted flush(const GrFlushInfo&); + GrSemaphoresSubmitted flush(const GrFlushInfo& info) { + return this->flush(info, GrPrepareForExternalIORequests()); + } + + /** + * Call to ensure all drawing to the context has been issued to the underlying 3D API. + * + * If this call returns GrSemaphoresSubmitted::kNo, the GPU backend will not have created or + * added any semaphores to signal on the GPU. Thus the client should not have the GPU wait on + * any of the semaphores passed in with the GrFlushInfo. However, any pending commands to the + * context will still be flushed. It should be emphasized that a return value of + * GrSemaphoresSubmitted::kNo does not mean the flush did not happen. It simply means there were + * no semaphores submitted to the GPU. A caller should only take this as a failure if they + * passed in semaphores to be submitted. + * + * If the GrPrepareForExternalIORequests contains valid gpu backed SkSurfaces or SkImages, Skia + * will put the underlying backend objects into a state that is ready for external uses. See + * declaration of GrPreopareForExternalIORequests for more details. + */ + GrSemaphoresSubmitted flush(const GrFlushInfo&, const GrPrepareForExternalIORequests&); /** * Deprecated. diff --git a/include/gpu/GrTypes.h b/include/gpu/GrTypes.h index a8ed1fc94e..35522d8473 100644 --- a/include/gpu/GrTypes.h +++ b/include/gpu/GrTypes.h @@ -13,6 +13,8 @@ #include "include/gpu/GrConfig.h" class GrBackendSemaphore; +class SkImage; +class SkSurface; //////////////////////////////////////////////////////////////////////////////// @@ -297,4 +299,34 @@ enum class GrSemaphoresSubmitted : bool { kYes = true }; +/** + * Array of SkImages and SkSurfaces which Skia will prepare for external use when passed into a + * flush call on GrContext. All the SkImages and SkSurfaces must be GPU backed. + * + * If fPrepareSurfaceForPresent is not nullptr, then it must be an array the size of fNumSurfaces. + * Each entry in the array corresponds to the SkSurface at the same index in the fSurfaces array. If + * an entry is true, then that surface will be prepared for both external use and present. + * + * Currently this only has an effect if the backend API is Vulkan. In this case, all the underlying + * VkImages associated with the SkImages and SkSurfaces will be transitioned into the VkQueueFamily + * in which they were originally wrapped or created with. This allows a client to wrap a VkImage + * from a queue which is different from the graphics queue and then have Skia transition it back to + * that queue without needing to delete the SkImage or SkSurface. If the an SkSurface is also + * flagged to be prepared for present, then its VkImageLayout will be set to + * VK_IMAGE_LAYOUT_PRESENT_SRC_KHR if the VK_KHR_swapchain extension has been enabled for the + * GrContext and the original queue is not VK_QUEUE_FAMILY_EXTERNAL or VK_QUEUE_FAMILY_FOREIGN_EXT. + * + * If an SkSurface or SkImage is used again, it will be transitioned back to the graphics queue and + * whatever layout is needed for its use. + */ +struct GrPrepareForExternalIORequests { + int fNumImages = 0; + SkImage** fImages = nullptr; + int fNumSurfaces = 0; + SkSurface** fSurfaces = nullptr; + bool* fPrepareSurfaceForPresent = nullptr; + + bool hasRequests() const { return fNumImages || fNumSurfaces; } +}; + #endif diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp index 19d1f6e8df..e2374e737a 100644 --- a/src/gpu/GrContext.cpp +++ b/src/gpu/GrContext.cpp @@ -258,14 +258,15 @@ bool GrContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[ //////////////////////////////////////////////////////////////////////////////// -GrSemaphoresSubmitted GrContext::flush(const GrFlushInfo& info) { +GrSemaphoresSubmitted GrContext::flush(const GrFlushInfo& info, + const GrPrepareForExternalIORequests& externalRequests) { ASSERT_SINGLE_OWNER if (this->abandoned()) { return GrSemaphoresSubmitted::kNo; } return this->drawingManager()->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, - info); + info, externalRequests); } //////////////////////////////////////////////////////////////////////////////// diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp index 208940b35f..b571724c84 100644 --- a/src/gpu/GrDrawingManager.cpp +++ b/src/gpu/GrDrawingManager.cpp @@ -194,10 +194,9 @@ void GrDrawingManager::freeGpuResources() { } // MDB TODO: make use of the 'proxy' parameter. -GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], - int numProxies, - SkSurface::BackendSurfaceAccess access, - const GrFlushInfo& info) { +GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int numProxies, + SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info, + const GrPrepareForExternalIORequests& externalRequests) { SkASSERT(numProxies >= 0); SkASSERT(!numProxies || proxies); GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext); @@ -211,7 +210,8 @@ GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], SkDEBUGCODE(this->validate()); - if (kNone_GrFlushFlags == info.fFlags && !info.fNumSemaphores && !info.fFinishedProc) { + if (kNone_GrFlushFlags == info.fFlags && !info.fNumSemaphores && !info.fFinishedProc && + !externalRequests.hasRequests()) { bool canSkip = numProxies > 0; for (int i = 0; i < numProxies && canSkip; ++i) { canSkip = !fDAG.isUsed(proxies[i]) && !this->isDDLTarget(proxies[i]); @@ -357,7 +357,8 @@ GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], opMemoryPool->isEmpty(); #endif - GrSemaphoresSubmitted result = gpu->finishFlush(proxies, numProxies, access, info); + GrSemaphoresSubmitted result = gpu->finishFlush(proxies, numProxies, access, info, + externalRequests); flushState.deinstantiateProxyTracker()->deinstantiateAllProxies(); @@ -441,7 +442,7 @@ bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushSt (*numOpListsExecuted)++; if (*numOpListsExecuted >= kMaxOpListsBeforeFlush) { flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, - GrFlushInfo()); + GrFlushInfo(), GrPrepareForExternalIORequests()); *numOpListsExecuted = 0; } } @@ -459,7 +460,7 @@ bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushSt (*numOpListsExecuted)++; if (*numOpListsExecuted >= kMaxOpListsBeforeFlush) { flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, - GrFlushInfo()); + GrFlushInfo(), GrPrepareForExternalIORequests()); *numOpListsExecuted = 0; } } @@ -500,7 +501,8 @@ GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(GrSurfaceProxy* proxies[], // TODO: It is important to upgrade the drawingmanager to just flushing the // portion of the DAG required by 'proxies' in order to restore some of the // semantics of this method. - GrSemaphoresSubmitted result = this->flush(proxies, numProxies, access, info); + GrSemaphoresSubmitted result = this->flush(proxies, numProxies, access, info, + GrPrepareForExternalIORequests()); for (int i = 0; i < numProxies; ++i) { if (!proxies[i]->isInstantiated()) { return result; @@ -745,7 +747,8 @@ void GrDrawingManager::flushIfNecessary() { auto resourceCache = direct->priv().getResourceCache(); if (resourceCache && resourceCache->requestsFlush()) { - this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo()); + this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(), + GrPrepareForExternalIORequests()); resourceCache->purgeAsNeeded(); } } diff --git a/src/gpu/GrDrawingManager.h b/src/gpu/GrDrawingManager.h index 521ed6606c..bc130df335 100644 --- a/src/gpu/GrDrawingManager.h +++ b/src/gpu/GrDrawingManager.h @@ -156,7 +156,8 @@ private: GrSemaphoresSubmitted flush(GrSurfaceProxy* proxies[], int numProxies, SkSurface::BackendSurfaceAccess access, - const GrFlushInfo&); + const GrFlushInfo&, + const GrPrepareForExternalIORequests&); SkDEBUGCODE(void validate() const); diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp index 479d733ab9..f5009443f5 100644 --- a/src/gpu/GrGpu.cpp +++ b/src/gpu/GrGpu.cpp @@ -415,7 +415,8 @@ int GrGpu::findOrAssignSamplePatternKey(GrRenderTarget* renderTarget) { GrSemaphoresSubmitted GrGpu::finishFlush(GrSurfaceProxy* proxies[], int n, SkSurface::BackendSurfaceAccess access, - const GrFlushInfo& info) { + const GrFlushInfo& info, + const GrPrepareForExternalIORequests& externalRequests) { this->stats()->incNumFinishFlushes(); GrResourceProvider* resourceProvider = fContext->priv().resourceProvider(); @@ -437,7 +438,7 @@ GrSemaphoresSubmitted GrGpu::finishFlush(GrSurfaceProxy* proxies[], } } } - this->onFinishFlush(proxies, n, access, info); + this->onFinishFlush(proxies, n, access, info, externalRequests); return this->caps()->semaphoreSupport() ? GrSemaphoresSubmitted::kYes : GrSemaphoresSubmitted::kNo; } diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h index be4a9bf60e..e40e80bcc6 100644 --- a/src/gpu/GrGpu.h +++ b/src/gpu/GrGpu.h @@ -304,7 +304,8 @@ public: // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the // inserted semaphores. GrSemaphoresSubmitted finishFlush(GrSurfaceProxy*[], int n, - SkSurface::BackendSurfaceAccess access, const GrFlushInfo&); + SkSurface::BackendSurfaceAccess access, const GrFlushInfo&, + const GrPrepareForExternalIORequests&); virtual void submit(GrGpuCommandBuffer*) = 0; @@ -548,7 +549,7 @@ private: bool canDiscardOutsideDstRect) = 0; virtual void onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access, - const GrFlushInfo&) = 0; + const GrFlushInfo&, const GrPrepareForExternalIORequests&) = 0; #ifdef SK_ENABLE_DUMP_GPU virtual void onDumpJSON(SkJSONWriter*) const {} diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp index f41733c74f..130f5ae43d 100644 --- a/src/gpu/gl/GrGLGpu.cpp +++ b/src/gpu/gl/GrGLGpu.cpp @@ -4275,7 +4275,7 @@ GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLG } void GrGLGpu::onFinishFlush(GrSurfaceProxy*[], int, SkSurface::BackendSurfaceAccess access, - const GrFlushInfo& info) { + const GrFlushInfo& info, const GrPrepareForExternalIORequests&) { // If we inserted semaphores during the flush, we need to call GLFlush. bool insertedSemaphore = info.fNumSemaphores > 0 && this->caps()->semaphoreSupport(); // We call finish if the client told us to sync or if we have a finished proc but don't support diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h index fde6d228d8..a93b42b82d 100644 --- a/src/gpu/gl/GrGLGpu.h +++ b/src/gpu/gl/GrGLGpu.h @@ -296,7 +296,7 @@ private: void flushBlend(const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle&); void onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access, - const GrFlushInfo&) override; + const GrFlushInfo&, const GrPrepareForExternalIORequests&) override; bool waitSync(GrGLsync, uint64_t timeout, bool flush); diff --git a/src/gpu/mock/GrMockGpu.h b/src/gpu/mock/GrMockGpu.h index 5df9c0276f..85938a45a5 100644 --- a/src/gpu/mock/GrMockGpu.h +++ b/src/gpu/mock/GrMockGpu.h @@ -112,7 +112,7 @@ private: void onResolveRenderTarget(GrRenderTarget* target) override { return; } void onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access, - const GrFlushInfo& info) override { + const GrFlushInfo& info, const GrPrepareForExternalIORequests&) override { if (info.fFinishedProc) { info.fFinishedProc(info.fFinishedContext); } diff --git a/src/gpu/mtl/GrMtlGpu.h b/src/gpu/mtl/GrMtlGpu.h index 0eed4ed9fa..f5003778cf 100644 --- a/src/gpu/mtl/GrMtlGpu.h +++ b/src/gpu/mtl/GrMtlGpu.h @@ -185,7 +185,7 @@ private: void onResolveRenderTarget(GrRenderTarget* target) override { return; } void onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access, - const GrFlushInfo& info) override { + const GrFlushInfo& info, const GrPrepareForExternalIORequests&) override { if (info.fFlags & kSyncCpu_GrFlushFlag) { this->submitCommandBuffer(kForce_SyncQueue); if (info.fFinishedProc) { diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp index b1c99791cb..5aca28c17d 100644 --- a/src/gpu/vk/GrVkGpu.cpp +++ b/src/gpu/vk/GrVkGpu.cpp @@ -18,8 +18,10 @@ #include "src/gpu/GrGpuResourceCacheAccess.h" #include "src/gpu/GrMesh.h" #include "src/gpu/GrPipeline.h" +#include "src/gpu/GrRenderTargetContext.h" #include "src/gpu/GrRenderTargetPriv.h" #include "src/gpu/GrTexturePriv.h" +#include "src/gpu/SkGpuDevice.h" #include "src/gpu/vk/GrVkAMDMemoryAllocator.h" #include "src/gpu/vk/GrVkCommandBuffer.h" #include "src/gpu/vk/GrVkCommandPool.h" @@ -37,6 +39,8 @@ #include "src/gpu/vk/GrVkTextureRenderTarget.h" #include "src/gpu/vk/GrVkTransferBuffer.h" #include "src/gpu/vk/GrVkVertexBuffer.h" +#include "src/image/SkImage_Gpu.h" +#include "src/image/SkSurface_Gpu.h" #include "src/sksl/SkSLCompiler.h" #include "include/gpu/vk/GrVkExtensions.h" @@ -1890,7 +1894,8 @@ void GrVkGpu::addImageMemoryBarrier(const GrVkResource* resource, } void GrVkGpu::onFinishFlush(GrSurfaceProxy* proxies[], int n, - SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info) { + SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info, + const GrPrepareForExternalIORequests& externalRequests) { SkASSERT(n >= 0); SkASSERT(!n || proxies); // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does @@ -1909,6 +1914,57 @@ void GrVkGpu::onFinishFlush(GrSurfaceProxy* proxies[], int n, image->prepareForPresent(this); } } + + // Handle requests for preparing for external IO + for (int i = 0; i < externalRequests.fNumImages; ++i) { + SkImage* image = externalRequests.fImages[i]; + if (!image->isTextureBacked()) { + continue; + } + SkImage_GpuBase* gpuImage = static_cast(as_IB(image)); + sk_sp proxy = gpuImage->asTextureProxyRef(this->getContext()); + SkASSERT(proxy); + + if (!proxy->isInstantiated()) { + auto resourceProvider = this->getContext()->priv().resourceProvider(); + if (!proxy->instantiate(resourceProvider)) { + continue; + } + } + + GrTexture* tex = proxy->peekTexture(); + if (!tex) { + continue; + } + GrVkTexture* vkTex = static_cast(tex); + vkTex->prepareForExternal(this); + } + for (int i = 0; i < externalRequests.fNumSurfaces; ++i) { + SkSurface* surface = externalRequests.fSurfaces[i]; + if (!surface->getCanvas()->getGrContext()) { + continue; + } + SkSurface_Gpu* gpuSurface = static_cast(surface); + auto* rtc = gpuSurface->getDevice()->accessRenderTargetContext(); + sk_sp proxy = rtc->asRenderTargetProxyRef(); + if (!proxy->isInstantiated()) { + auto resourceProvider = this->getContext()->priv().resourceProvider(); + if (!proxy->instantiate(resourceProvider)) { + continue; + } + } + + GrRenderTarget* rt = proxy->peekRenderTarget(); + SkASSERT(rt); + GrVkRenderTarget* vkRT = static_cast(rt); + if (externalRequests.fPrepareSurfaceForPresent && + externalRequests.fPrepareSurfaceForPresent[i]) { + vkRT->prepareForPresent(this); + } else { + vkRT->prepareForExternal(this); + } + } + if (info.fFlags & kSyncCpu_GrFlushFlag) { this->submitCommandBuffer(kForce_SyncQueue, info.fFinishedProc, info.fFinishedContext); } else { diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h index 9446467f32..3ca94c419a 100644 --- a/src/gpu/vk/GrVkGpu.h +++ b/src/gpu/vk/GrVkGpu.h @@ -226,7 +226,7 @@ private: const SkIPoint& dstPoint, bool canDiscardOutsideDstRect) override; void onFinishFlush(GrSurfaceProxy*[], int, SkSurface::BackendSurfaceAccess access, - const GrFlushInfo&) override; + const GrFlushInfo&, const GrPrepareForExternalIORequests&) override; // Ends and submits the current command buffer to the queue and then creates a new command // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all diff --git a/src/gpu/vk/GrVkImage.cpp b/src/gpu/vk/GrVkImage.cpp index e48a48da15..c7676bb5c1 100644 --- a/src/gpu/vk/GrVkImage.cpp +++ b/src/gpu/vk/GrVkImage.cpp @@ -231,6 +231,11 @@ void GrVkImage::prepareForPresent(GrVkGpu* gpu) { this->setImageLayout(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false, true); } +void GrVkImage::prepareForExternal(GrVkGpu* gpu) { + this->setImageLayout(gpu, this->currentLayout(), 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false, + true); +} + void GrVkImage::releaseImage(GrVkGpu* gpu) { if (fInfo.fCurrentQueueFamily != fInitialQueueFamily) { // The Vulkan spec is vague on what to put for the dstStageMask here. The spec for image diff --git a/src/gpu/vk/GrVkImage.h b/src/gpu/vk/GrVkImage.h index 35fe357cb9..ec59be6220 100644 --- a/src/gpu/vk/GrVkImage.h +++ b/src/gpu/vk/GrVkImage.h @@ -98,6 +98,9 @@ public: // family is not external or foreign. void prepareForPresent(GrVkGpu* gpu); + // Returns the image to its original queue family + void prepareForExternal(GrVkGpu* gpu); + // This simply updates our tracking of the image layout and does not actually do any gpu work. // This is only used for mip map generation where we are manually changing the layouts as we // blit each layer, and then at the end need to update our tracking. diff --git a/src/image/SkImage_Gpu.cpp b/src/image/SkImage_Gpu.cpp index 02bb8b550c..21d74b79a8 100644 --- a/src/image/SkImage_Gpu.cpp +++ b/src/image/SkImage_Gpu.cpp @@ -666,7 +666,8 @@ sk_sp SkImage::MakeFromAHardwareBufferWithData(GrContext* context, GrFlushInfo info; info.fFlags = kSyncCpu_GrFlushFlag; GrSurfaceProxy* p[1] = {proxy.get()}; - drawingManager->flush(p, 1, SkSurface::BackendSurfaceAccess::kNoAccess, info); + drawingManager->flush(p, 1, SkSurface::BackendSurfaceAccess::kNoAccess, info, + GrPrepareForExternalIORequests()); return image; } diff --git a/tests/GLProgramsTest.cpp b/tests/GLProgramsTest.cpp index df2fc1df9b..f7a5bc08e8 100644 --- a/tests/GLProgramsTest.cpp +++ b/tests/GLProgramsTest.cpp @@ -313,7 +313,8 @@ bool GrDrawingManager::ProgramUnitTest(GrContext* context, int maxStages, int ma GrDrawRandomOp(&random, renderTargetContext.get(), std::move(paint)); } // Flush everything, test passes if flush is successful(ie, no asserts are hit, no crashes) - drawingManager->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo()); + drawingManager->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(), + GrPrepareForExternalIORequests()); const GrBackendFormat format = context->priv().caps()->getBackendFormatFromColorType(kRGBA_8888_SkColorType); @@ -343,7 +344,7 @@ bool GrDrawingManager::ProgramUnitTest(GrContext* context, int maxStages, int ma paint.addColorFragmentProcessor(std::move(blockFP)); GrDrawRandomOp(&random, renderTargetContext.get(), std::move(paint)); drawingManager->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, - GrFlushInfo()); + GrFlushInfo(), GrPrepareForExternalIORequests()); } } diff --git a/tests/TransferPixelsTest.cpp b/tests/TransferPixelsTest.cpp index 952d2b8527..ed36a143b4 100644 --- a/tests/TransferPixelsTest.cpp +++ b/tests/TransferPixelsTest.cpp @@ -275,7 +275,8 @@ void basic_transfer_from_test(skiatest::Reporter* reporter, const sk_gpu_test::C GrFlushInfo flushInfo; flushInfo.fFlags = kSyncCpu_GrFlushFlag; if (context->priv().caps()->mapBufferFlags() & GrCaps::kAsyncRead_MapFlag) { - gpu->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, flushInfo); + gpu->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, flushInfo, + GrPrepareForExternalIORequests()); } const auto* map = reinterpret_cast(buffer->map()); @@ -304,7 +305,8 @@ void basic_transfer_from_test(skiatest::Reporter* reporter, const sk_gpu_test::C ++expectedTransferCnt; if (context->priv().caps()->mapBufferFlags() & GrCaps::kAsyncRead_MapFlag) { - gpu->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, flushInfo); + gpu->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, flushInfo, + GrPrepareForExternalIORequests()); } map = reinterpret_cast(buffer->map()); diff --git a/tests/VkBackendSurfaceTest.cpp b/tests/VkBackendSurfaceTest.cpp index 94cb6937d4..bce65a80f8 100644 --- a/tests/VkBackendSurfaceTest.cpp +++ b/tests/VkBackendSurfaceTest.cpp @@ -21,10 +21,13 @@ #include "include/gpu/vk/GrVkTypes.h" #include "include/private/GrTextureProxy.h" #include "src/gpu/GrContextPriv.h" +#include "src/gpu/GrRenderTargetContext.h" +#include "src/gpu/SkGpuDevice.h" #include "src/gpu/vk/GrVkGpu.h" #include "src/gpu/vk/GrVkImageLayout.h" #include "src/gpu/vk/GrVkTexture.h" #include "src/image/SkImage_Base.h" +#include "src/image/SkSurface_Gpu.h" DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkImageLayoutTest, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); @@ -203,6 +206,127 @@ DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkReleaseExternalQueueTest, reporter, ctxInfo) { } } +// Test to make sure we transition to the original queue when requests for prepareforexternalio are +// in flush calls +DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkPrepareForExternalIOQueueTransitionTest, reporter, ctxInfo) { + GrContext* context = ctxInfo.grContext(); + GrVkGpu* gpu = static_cast(context->priv().getGpu()); + if (!gpu->vkCaps().supportsExternalMemory()) { + return; + } + + for (bool useSurface : {false, true}) { + for (bool preparePresent : {false, true}) { + if (!useSurface && preparePresent) { + // We don't set textures to present + continue; + } + GrBackendTexture backendTex = gpu->createTestingOnlyBackendTexture( + nullptr, 4, 4, GrColorType::kRGBA_8888, useSurface, GrMipMapped::kNo); + + // Make a backend texture with an external queue family and general layout. + GrVkImageInfo vkInfo; + if (!backendTex.getVkImageInfo(&vkInfo)) { + return; + } + + // We can't actually make an external texture in our test. However, we lie and say it is + // and then will manually go and swap the queue to the graphics queue once we wrap it. + if (preparePresent) { + // We don't transition to present to things that are going to external for foreign + // queues. + vkInfo.fCurrentQueueFamily = gpu->queueIndex(); + } else { + vkInfo.fCurrentQueueFamily = VK_QUEUE_FAMILY_EXTERNAL; + } + + GrBackendTexture vkExtTex(1, 1, vkInfo); + + sk_sp image; + sk_sp surface; + GrTexture* texture; + if (useSurface) { + surface = SkSurface::MakeFromBackendTexture(context, vkExtTex, + kTopLeft_GrSurfaceOrigin, 0, kRGBA_8888_SkColorType, nullptr, nullptr); + REPORTER_ASSERT(reporter, surface.get()); + if (!surface) { + continue; + } + SkSurface_Gpu* gpuSurface = static_cast(surface.get()); + auto* rtc = gpuSurface->getDevice()->accessRenderTargetContext(); + texture = rtc->asTextureProxy()->peekTexture(); + } else { + image = SkImage::MakeFromTexture(context, vkExtTex, kTopLeft_GrSurfaceOrigin, + kRGBA_8888_SkColorType, kPremul_SkAlphaType, nullptr, nullptr, nullptr); + + REPORTER_ASSERT(reporter, image.get()); + if (!image) { + continue; + } + + texture = image->getTexture(); + } + + REPORTER_ASSERT(reporter, texture); + GrVkTexture* vkTex = static_cast(texture); + + // Testing helper so we claim that we don't need to transition from our fake external + // queue first. + vkTex->setCurrentQueueFamilyToGraphicsQueue(gpu); + + GrBackendTexture newBackendTexture; + if (useSurface) { + newBackendTexture = surface->getBackendTexture( + SkSurface::kFlushRead_TextureHandleAccess); + } else { + newBackendTexture = image->getBackendTexture(false); + } + GrVkImageInfo newVkInfo; + REPORTER_ASSERT(reporter, newBackendTexture.getVkImageInfo(&newVkInfo)); + REPORTER_ASSERT(reporter, newVkInfo.fCurrentQueueFamily == gpu->queueIndex()); + VkImageLayout oldLayout = newVkInfo.fImageLayout; + + GrPrepareForExternalIORequests externalRequests; + SkImage* imagePtr; + SkSurface* surfacePtr; + if (useSurface) { + externalRequests.fNumSurfaces = 1; + surfacePtr = surface.get(); + externalRequests.fSurfaces = &surfacePtr; + externalRequests.fPrepareSurfaceForPresent = &preparePresent; + } else { + externalRequests.fNumImages = 1; + imagePtr = image.get(); + externalRequests.fImages = &imagePtr; + + } + context->flush(GrFlushInfo(), externalRequests); + + if (useSurface) { + newBackendTexture = surface->getBackendTexture( + SkSurface::kFlushRead_TextureHandleAccess); + } else { + newBackendTexture = image->getBackendTexture(false); + } + REPORTER_ASSERT(reporter, newBackendTexture.getVkImageInfo(&newVkInfo)); + if (preparePresent) { + REPORTER_ASSERT(reporter, newVkInfo.fCurrentQueueFamily == gpu->queueIndex()); + REPORTER_ASSERT(reporter, + newVkInfo.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); + } else { + REPORTER_ASSERT(reporter, newVkInfo.fCurrentQueueFamily == VK_QUEUE_FAMILY_EXTERNAL); + REPORTER_ASSERT(reporter, newVkInfo.fImageLayout == oldLayout); + } + + GrFlushInfo flushInfo; + flushInfo.fFlags = kSyncCpu_GrFlushFlag; + context->flush(flushInfo); + gpu->deleteTestingOnlyBackendTexture(backendTex); + } + } +} + + // Test to make sure we transition from the EXTERNAL queue even when no layout transition is needed. DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkTransitionExternalQueueTest, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext();