Add option to GrContext::flush for GrPrepareForExternalIORequests.

This will allow clients to specify a set of SkImages and SkSurfaces that they
want transitioned to a state which they can use for their own external IO.
Specifically for Vulkan this will move the surface and images back to the queues
in which they were originally wrapped or created with.

Bug: skia:8802
Change-Id: I6a76c4c4a333a8e752632d349899f5fd9921329d
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/210460
Commit-Queue: Greg Daniel <egdaniel@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
This commit is contained in:
Greg Daniel 2019-05-09 14:04:20 -04:00 committed by Skia Commit-Bot
parent c04cadbb6e
commit 797efcac71
19 changed files with 284 additions and 31 deletions

View File

@ -256,7 +256,7 @@ public:
* Call to ensure all drawing to the context has been issued to the underlying 3D API.
*/
void flush() {
this->flush(GrFlushInfo());
this->flush(GrFlushInfo(), GrPrepareForExternalIORequests());
}
/**
@ -265,9 +265,31 @@ public:
* If this call returns GrSemaphoresSubmitted::kNo, the GPU backend will not have created or
* added any semaphores to signal on the GPU. Thus the client should not have the GPU wait on
* any of the semaphores passed in with the GrFlushInfo. However, any pending commands to the
* context will still be flushed.
* context will still be flushed. It should be emphasized that a return value of
* GrSemaphoresSubmitted::kNo does not mean the flush did not happen. It simply means there were
* no semaphores submitted to the GPU. A caller should only take this as a failure if they
* passed in semaphores to be submitted.
*/
GrSemaphoresSubmitted flush(const GrFlushInfo&);
GrSemaphoresSubmitted flush(const GrFlushInfo& info) {
return this->flush(info, GrPrepareForExternalIORequests());
}
/**
* Call to ensure all drawing to the context has been issued to the underlying 3D API.
*
* If this call returns GrSemaphoresSubmitted::kNo, the GPU backend will not have created or
* added any semaphores to signal on the GPU. Thus the client should not have the GPU wait on
* any of the semaphores passed in with the GrFlushInfo. However, any pending commands to the
* context will still be flushed. It should be emphasized that a return value of
* GrSemaphoresSubmitted::kNo does not mean the flush did not happen. It simply means there were
* no semaphores submitted to the GPU. A caller should only take this as a failure if they
* passed in semaphores to be submitted.
*
* If the GrPrepareForExternalIORequests contains valid gpu backed SkSurfaces or SkImages, Skia
* will put the underlying backend objects into a state that is ready for external uses. See
* declaration of GrPreopareForExternalIORequests for more details.
*/
GrSemaphoresSubmitted flush(const GrFlushInfo&, const GrPrepareForExternalIORequests&);
/**
* Deprecated.

View File

@ -13,6 +13,8 @@
#include "include/gpu/GrConfig.h"
class GrBackendSemaphore;
class SkImage;
class SkSurface;
////////////////////////////////////////////////////////////////////////////////
@ -297,4 +299,34 @@ enum class GrSemaphoresSubmitted : bool {
kYes = true
};
/**
* Array of SkImages and SkSurfaces which Skia will prepare for external use when passed into a
* flush call on GrContext. All the SkImages and SkSurfaces must be GPU backed.
*
* If fPrepareSurfaceForPresent is not nullptr, then it must be an array the size of fNumSurfaces.
* Each entry in the array corresponds to the SkSurface at the same index in the fSurfaces array. If
* an entry is true, then that surface will be prepared for both external use and present.
*
* Currently this only has an effect if the backend API is Vulkan. In this case, all the underlying
* VkImages associated with the SkImages and SkSurfaces will be transitioned into the VkQueueFamily
* in which they were originally wrapped or created with. This allows a client to wrap a VkImage
* from a queue which is different from the graphics queue and then have Skia transition it back to
* that queue without needing to delete the SkImage or SkSurface. If the an SkSurface is also
* flagged to be prepared for present, then its VkImageLayout will be set to
* VK_IMAGE_LAYOUT_PRESENT_SRC_KHR if the VK_KHR_swapchain extension has been enabled for the
* GrContext and the original queue is not VK_QUEUE_FAMILY_EXTERNAL or VK_QUEUE_FAMILY_FOREIGN_EXT.
*
* If an SkSurface or SkImage is used again, it will be transitioned back to the graphics queue and
* whatever layout is needed for its use.
*/
struct GrPrepareForExternalIORequests {
int fNumImages = 0;
SkImage** fImages = nullptr;
int fNumSurfaces = 0;
SkSurface** fSurfaces = nullptr;
bool* fPrepareSurfaceForPresent = nullptr;
bool hasRequests() const { return fNumImages || fNumSurfaces; }
};
#endif

View File

@ -258,14 +258,15 @@ bool GrContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[
////////////////////////////////////////////////////////////////////////////////
GrSemaphoresSubmitted GrContext::flush(const GrFlushInfo& info) {
GrSemaphoresSubmitted GrContext::flush(const GrFlushInfo& info,
const GrPrepareForExternalIORequests& externalRequests) {
ASSERT_SINGLE_OWNER
if (this->abandoned()) {
return GrSemaphoresSubmitted::kNo;
}
return this->drawingManager()->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
info);
info, externalRequests);
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -194,10 +194,9 @@ void GrDrawingManager::freeGpuResources() {
}
// MDB TODO: make use of the 'proxy' parameter.
GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[],
int numProxies,
SkSurface::BackendSurfaceAccess access,
const GrFlushInfo& info) {
GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int numProxies,
SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info,
const GrPrepareForExternalIORequests& externalRequests) {
SkASSERT(numProxies >= 0);
SkASSERT(!numProxies || proxies);
GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
@ -211,7 +210,8 @@ GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[],
SkDEBUGCODE(this->validate());
if (kNone_GrFlushFlags == info.fFlags && !info.fNumSemaphores && !info.fFinishedProc) {
if (kNone_GrFlushFlags == info.fFlags && !info.fNumSemaphores && !info.fFinishedProc &&
!externalRequests.hasRequests()) {
bool canSkip = numProxies > 0;
for (int i = 0; i < numProxies && canSkip; ++i) {
canSkip = !fDAG.isUsed(proxies[i]) && !this->isDDLTarget(proxies[i]);
@ -357,7 +357,8 @@ GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[],
opMemoryPool->isEmpty();
#endif
GrSemaphoresSubmitted result = gpu->finishFlush(proxies, numProxies, access, info);
GrSemaphoresSubmitted result = gpu->finishFlush(proxies, numProxies, access, info,
externalRequests);
flushState.deinstantiateProxyTracker()->deinstantiateAllProxies();
@ -441,7 +442,7 @@ bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushSt
(*numOpListsExecuted)++;
if (*numOpListsExecuted >= kMaxOpListsBeforeFlush) {
flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
GrFlushInfo());
GrFlushInfo(), GrPrepareForExternalIORequests());
*numOpListsExecuted = 0;
}
}
@ -459,7 +460,7 @@ bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushSt
(*numOpListsExecuted)++;
if (*numOpListsExecuted >= kMaxOpListsBeforeFlush) {
flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
GrFlushInfo());
GrFlushInfo(), GrPrepareForExternalIORequests());
*numOpListsExecuted = 0;
}
}
@ -500,7 +501,8 @@ GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(GrSurfaceProxy* proxies[],
// TODO: It is important to upgrade the drawingmanager to just flushing the
// portion of the DAG required by 'proxies' in order to restore some of the
// semantics of this method.
GrSemaphoresSubmitted result = this->flush(proxies, numProxies, access, info);
GrSemaphoresSubmitted result = this->flush(proxies, numProxies, access, info,
GrPrepareForExternalIORequests());
for (int i = 0; i < numProxies; ++i) {
if (!proxies[i]->isInstantiated()) {
return result;
@ -745,7 +747,8 @@ void GrDrawingManager::flushIfNecessary() {
auto resourceCache = direct->priv().getResourceCache();
if (resourceCache && resourceCache->requestsFlush()) {
this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo());
this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(),
GrPrepareForExternalIORequests());
resourceCache->purgeAsNeeded();
}
}

View File

@ -156,7 +156,8 @@ private:
GrSemaphoresSubmitted flush(GrSurfaceProxy* proxies[],
int numProxies,
SkSurface::BackendSurfaceAccess access,
const GrFlushInfo&);
const GrFlushInfo&,
const GrPrepareForExternalIORequests&);
SkDEBUGCODE(void validate() const);

View File

@ -415,7 +415,8 @@ int GrGpu::findOrAssignSamplePatternKey(GrRenderTarget* renderTarget) {
GrSemaphoresSubmitted GrGpu::finishFlush(GrSurfaceProxy* proxies[],
int n,
SkSurface::BackendSurfaceAccess access,
const GrFlushInfo& info) {
const GrFlushInfo& info,
const GrPrepareForExternalIORequests& externalRequests) {
this->stats()->incNumFinishFlushes();
GrResourceProvider* resourceProvider = fContext->priv().resourceProvider();
@ -437,7 +438,7 @@ GrSemaphoresSubmitted GrGpu::finishFlush(GrSurfaceProxy* proxies[],
}
}
}
this->onFinishFlush(proxies, n, access, info);
this->onFinishFlush(proxies, n, access, info, externalRequests);
return this->caps()->semaphoreSupport() ? GrSemaphoresSubmitted::kYes
: GrSemaphoresSubmitted::kNo;
}

View File

@ -304,7 +304,8 @@ public:
// insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the
// inserted semaphores.
GrSemaphoresSubmitted finishFlush(GrSurfaceProxy*[], int n,
SkSurface::BackendSurfaceAccess access, const GrFlushInfo&);
SkSurface::BackendSurfaceAccess access, const GrFlushInfo&,
const GrPrepareForExternalIORequests&);
virtual void submit(GrGpuCommandBuffer*) = 0;
@ -548,7 +549,7 @@ private:
bool canDiscardOutsideDstRect) = 0;
virtual void onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo&) = 0;
const GrFlushInfo&, const GrPrepareForExternalIORequests&) = 0;
#ifdef SK_ENABLE_DUMP_GPU
virtual void onDumpJSON(SkJSONWriter*) const {}

View File

@ -4275,7 +4275,7 @@ GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLG
}
void GrGLGpu::onFinishFlush(GrSurfaceProxy*[], int, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo& info) {
const GrFlushInfo& info, const GrPrepareForExternalIORequests&) {
// If we inserted semaphores during the flush, we need to call GLFlush.
bool insertedSemaphore = info.fNumSemaphores > 0 && this->caps()->semaphoreSupport();
// We call finish if the client told us to sync or if we have a finished proc but don't support

View File

@ -296,7 +296,7 @@ private:
void flushBlend(const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle&);
void onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo&) override;
const GrFlushInfo&, const GrPrepareForExternalIORequests&) override;
bool waitSync(GrGLsync, uint64_t timeout, bool flush);

View File

@ -112,7 +112,7 @@ private:
void onResolveRenderTarget(GrRenderTarget* target) override { return; }
void onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo& info) override {
const GrFlushInfo& info, const GrPrepareForExternalIORequests&) override {
if (info.fFinishedProc) {
info.fFinishedProc(info.fFinishedContext);
}

View File

@ -185,7 +185,7 @@ private:
void onResolveRenderTarget(GrRenderTarget* target) override { return; }
void onFinishFlush(GrSurfaceProxy*[], int n, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo& info) override {
const GrFlushInfo& info, const GrPrepareForExternalIORequests&) override {
if (info.fFlags & kSyncCpu_GrFlushFlag) {
this->submitCommandBuffer(kForce_SyncQueue);
if (info.fFinishedProc) {

View File

@ -18,8 +18,10 @@
#include "src/gpu/GrGpuResourceCacheAccess.h"
#include "src/gpu/GrMesh.h"
#include "src/gpu/GrPipeline.h"
#include "src/gpu/GrRenderTargetContext.h"
#include "src/gpu/GrRenderTargetPriv.h"
#include "src/gpu/GrTexturePriv.h"
#include "src/gpu/SkGpuDevice.h"
#include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
#include "src/gpu/vk/GrVkCommandBuffer.h"
#include "src/gpu/vk/GrVkCommandPool.h"
@ -37,6 +39,8 @@
#include "src/gpu/vk/GrVkTextureRenderTarget.h"
#include "src/gpu/vk/GrVkTransferBuffer.h"
#include "src/gpu/vk/GrVkVertexBuffer.h"
#include "src/image/SkImage_Gpu.h"
#include "src/image/SkSurface_Gpu.h"
#include "src/sksl/SkSLCompiler.h"
#include "include/gpu/vk/GrVkExtensions.h"
@ -1890,7 +1894,8 @@ void GrVkGpu::addImageMemoryBarrier(const GrVkResource* resource,
}
void GrVkGpu::onFinishFlush(GrSurfaceProxy* proxies[], int n,
SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info) {
SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info,
const GrPrepareForExternalIORequests& externalRequests) {
SkASSERT(n >= 0);
SkASSERT(!n || proxies);
// Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
@ -1909,6 +1914,57 @@ void GrVkGpu::onFinishFlush(GrSurfaceProxy* proxies[], int n,
image->prepareForPresent(this);
}
}
// Handle requests for preparing for external IO
for (int i = 0; i < externalRequests.fNumImages; ++i) {
SkImage* image = externalRequests.fImages[i];
if (!image->isTextureBacked()) {
continue;
}
SkImage_GpuBase* gpuImage = static_cast<SkImage_GpuBase*>(as_IB(image));
sk_sp<GrTextureProxy> proxy = gpuImage->asTextureProxyRef(this->getContext());
SkASSERT(proxy);
if (!proxy->isInstantiated()) {
auto resourceProvider = this->getContext()->priv().resourceProvider();
if (!proxy->instantiate(resourceProvider)) {
continue;
}
}
GrTexture* tex = proxy->peekTexture();
if (!tex) {
continue;
}
GrVkTexture* vkTex = static_cast<GrVkTexture*>(tex);
vkTex->prepareForExternal(this);
}
for (int i = 0; i < externalRequests.fNumSurfaces; ++i) {
SkSurface* surface = externalRequests.fSurfaces[i];
if (!surface->getCanvas()->getGrContext()) {
continue;
}
SkSurface_Gpu* gpuSurface = static_cast<SkSurface_Gpu*>(surface);
auto* rtc = gpuSurface->getDevice()->accessRenderTargetContext();
sk_sp<GrRenderTargetProxy> proxy = rtc->asRenderTargetProxyRef();
if (!proxy->isInstantiated()) {
auto resourceProvider = this->getContext()->priv().resourceProvider();
if (!proxy->instantiate(resourceProvider)) {
continue;
}
}
GrRenderTarget* rt = proxy->peekRenderTarget();
SkASSERT(rt);
GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
if (externalRequests.fPrepareSurfaceForPresent &&
externalRequests.fPrepareSurfaceForPresent[i]) {
vkRT->prepareForPresent(this);
} else {
vkRT->prepareForExternal(this);
}
}
if (info.fFlags & kSyncCpu_GrFlushFlag) {
this->submitCommandBuffer(kForce_SyncQueue, info.fFinishedProc, info.fFinishedContext);
} else {

View File

@ -226,7 +226,7 @@ private:
const SkIPoint& dstPoint, bool canDiscardOutsideDstRect) override;
void onFinishFlush(GrSurfaceProxy*[], int, SkSurface::BackendSurfaceAccess access,
const GrFlushInfo&) override;
const GrFlushInfo&, const GrPrepareForExternalIORequests&) override;
// Ends and submits the current command buffer to the queue and then creates a new command
// buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all

View File

@ -231,6 +231,11 @@ void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
this->setImageLayout(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false, true);
}
void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
this->setImageLayout(gpu, this->currentLayout(), 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
true);
}
void GrVkImage::releaseImage(GrVkGpu* gpu) {
if (fInfo.fCurrentQueueFamily != fInitialQueueFamily) {
// The Vulkan spec is vague on what to put for the dstStageMask here. The spec for image

View File

@ -98,6 +98,9 @@ public:
// family is not external or foreign.
void prepareForPresent(GrVkGpu* gpu);
// Returns the image to its original queue family
void prepareForExternal(GrVkGpu* gpu);
// This simply updates our tracking of the image layout and does not actually do any gpu work.
// This is only used for mip map generation where we are manually changing the layouts as we
// blit each layer, and then at the end need to update our tracking.

View File

@ -666,7 +666,8 @@ sk_sp<SkImage> SkImage::MakeFromAHardwareBufferWithData(GrContext* context,
GrFlushInfo info;
info.fFlags = kSyncCpu_GrFlushFlag;
GrSurfaceProxy* p[1] = {proxy.get()};
drawingManager->flush(p, 1, SkSurface::BackendSurfaceAccess::kNoAccess, info);
drawingManager->flush(p, 1, SkSurface::BackendSurfaceAccess::kNoAccess, info,
GrPrepareForExternalIORequests());
return image;
}

View File

@ -313,7 +313,8 @@ bool GrDrawingManager::ProgramUnitTest(GrContext* context, int maxStages, int ma
GrDrawRandomOp(&random, renderTargetContext.get(), std::move(paint));
}
// Flush everything, test passes if flush is successful(ie, no asserts are hit, no crashes)
drawingManager->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo());
drawingManager->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(),
GrPrepareForExternalIORequests());
const GrBackendFormat format =
context->priv().caps()->getBackendFormatFromColorType(kRGBA_8888_SkColorType);
@ -343,7 +344,7 @@ bool GrDrawingManager::ProgramUnitTest(GrContext* context, int maxStages, int ma
paint.addColorFragmentProcessor(std::move(blockFP));
GrDrawRandomOp(&random, renderTargetContext.get(), std::move(paint));
drawingManager->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
GrFlushInfo());
GrFlushInfo(), GrPrepareForExternalIORequests());
}
}

View File

@ -275,7 +275,8 @@ void basic_transfer_from_test(skiatest::Reporter* reporter, const sk_gpu_test::C
GrFlushInfo flushInfo;
flushInfo.fFlags = kSyncCpu_GrFlushFlag;
if (context->priv().caps()->mapBufferFlags() & GrCaps::kAsyncRead_MapFlag) {
gpu->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, flushInfo);
gpu->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, flushInfo,
GrPrepareForExternalIORequests());
}
const auto* map = reinterpret_cast<const GrColor*>(buffer->map());
@ -304,7 +305,8 @@ void basic_transfer_from_test(skiatest::Reporter* reporter, const sk_gpu_test::C
++expectedTransferCnt;
if (context->priv().caps()->mapBufferFlags() & GrCaps::kAsyncRead_MapFlag) {
gpu->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, flushInfo);
gpu->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, flushInfo,
GrPrepareForExternalIORequests());
}
map = reinterpret_cast<const GrColor*>(buffer->map());

View File

@ -21,10 +21,13 @@
#include "include/gpu/vk/GrVkTypes.h"
#include "include/private/GrTextureProxy.h"
#include "src/gpu/GrContextPriv.h"
#include "src/gpu/GrRenderTargetContext.h"
#include "src/gpu/SkGpuDevice.h"
#include "src/gpu/vk/GrVkGpu.h"
#include "src/gpu/vk/GrVkImageLayout.h"
#include "src/gpu/vk/GrVkTexture.h"
#include "src/image/SkImage_Base.h"
#include "src/image/SkSurface_Gpu.h"
DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkImageLayoutTest, reporter, ctxInfo) {
GrContext* context = ctxInfo.grContext();
@ -203,6 +206,127 @@ DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkReleaseExternalQueueTest, reporter, ctxInfo) {
}
}
// Test to make sure we transition to the original queue when requests for prepareforexternalio are
// in flush calls
DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkPrepareForExternalIOQueueTransitionTest, reporter, ctxInfo) {
GrContext* context = ctxInfo.grContext();
GrVkGpu* gpu = static_cast<GrVkGpu*>(context->priv().getGpu());
if (!gpu->vkCaps().supportsExternalMemory()) {
return;
}
for (bool useSurface : {false, true}) {
for (bool preparePresent : {false, true}) {
if (!useSurface && preparePresent) {
// We don't set textures to present
continue;
}
GrBackendTexture backendTex = gpu->createTestingOnlyBackendTexture(
nullptr, 4, 4, GrColorType::kRGBA_8888, useSurface, GrMipMapped::kNo);
// Make a backend texture with an external queue family and general layout.
GrVkImageInfo vkInfo;
if (!backendTex.getVkImageInfo(&vkInfo)) {
return;
}
// We can't actually make an external texture in our test. However, we lie and say it is
// and then will manually go and swap the queue to the graphics queue once we wrap it.
if (preparePresent) {
// We don't transition to present to things that are going to external for foreign
// queues.
vkInfo.fCurrentQueueFamily = gpu->queueIndex();
} else {
vkInfo.fCurrentQueueFamily = VK_QUEUE_FAMILY_EXTERNAL;
}
GrBackendTexture vkExtTex(1, 1, vkInfo);
sk_sp<SkImage> image;
sk_sp<SkSurface> surface;
GrTexture* texture;
if (useSurface) {
surface = SkSurface::MakeFromBackendTexture(context, vkExtTex,
kTopLeft_GrSurfaceOrigin, 0, kRGBA_8888_SkColorType, nullptr, nullptr);
REPORTER_ASSERT(reporter, surface.get());
if (!surface) {
continue;
}
SkSurface_Gpu* gpuSurface = static_cast<SkSurface_Gpu*>(surface.get());
auto* rtc = gpuSurface->getDevice()->accessRenderTargetContext();
texture = rtc->asTextureProxy()->peekTexture();
} else {
image = SkImage::MakeFromTexture(context, vkExtTex, kTopLeft_GrSurfaceOrigin,
kRGBA_8888_SkColorType, kPremul_SkAlphaType, nullptr, nullptr, nullptr);
REPORTER_ASSERT(reporter, image.get());
if (!image) {
continue;
}
texture = image->getTexture();
}
REPORTER_ASSERT(reporter, texture);
GrVkTexture* vkTex = static_cast<GrVkTexture*>(texture);
// Testing helper so we claim that we don't need to transition from our fake external
// queue first.
vkTex->setCurrentQueueFamilyToGraphicsQueue(gpu);
GrBackendTexture newBackendTexture;
if (useSurface) {
newBackendTexture = surface->getBackendTexture(
SkSurface::kFlushRead_TextureHandleAccess);
} else {
newBackendTexture = image->getBackendTexture(false);
}
GrVkImageInfo newVkInfo;
REPORTER_ASSERT(reporter, newBackendTexture.getVkImageInfo(&newVkInfo));
REPORTER_ASSERT(reporter, newVkInfo.fCurrentQueueFamily == gpu->queueIndex());
VkImageLayout oldLayout = newVkInfo.fImageLayout;
GrPrepareForExternalIORequests externalRequests;
SkImage* imagePtr;
SkSurface* surfacePtr;
if (useSurface) {
externalRequests.fNumSurfaces = 1;
surfacePtr = surface.get();
externalRequests.fSurfaces = &surfacePtr;
externalRequests.fPrepareSurfaceForPresent = &preparePresent;
} else {
externalRequests.fNumImages = 1;
imagePtr = image.get();
externalRequests.fImages = &imagePtr;
}
context->flush(GrFlushInfo(), externalRequests);
if (useSurface) {
newBackendTexture = surface->getBackendTexture(
SkSurface::kFlushRead_TextureHandleAccess);
} else {
newBackendTexture = image->getBackendTexture(false);
}
REPORTER_ASSERT(reporter, newBackendTexture.getVkImageInfo(&newVkInfo));
if (preparePresent) {
REPORTER_ASSERT(reporter, newVkInfo.fCurrentQueueFamily == gpu->queueIndex());
REPORTER_ASSERT(reporter,
newVkInfo.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
} else {
REPORTER_ASSERT(reporter, newVkInfo.fCurrentQueueFamily == VK_QUEUE_FAMILY_EXTERNAL);
REPORTER_ASSERT(reporter, newVkInfo.fImageLayout == oldLayout);
}
GrFlushInfo flushInfo;
flushInfo.fFlags = kSyncCpu_GrFlushFlag;
context->flush(flushInfo);
gpu->deleteTestingOnlyBackendTexture(backendTex);
}
}
}
// Test to make sure we transition from the EXTERNAL queue even when no layout transition is needed.
DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkTransitionExternalQueueTest, reporter, ctxInfo) {
GrContext* context = ctxInfo.grContext();