Make use of VkSemaphores thread safe assuming use of the same VkQueue.
This allows us to re-enable support for multiple GrContexts in GrBackendTextureImageGenerator. Bug: skia: Change-Id: Ifd6ac1ad81cdfbd1fd986467d8beb359399d6588 Reviewed-on: https://skia-review.googlesource.com/98340 Reviewed-by: Brian Osman <brianosman@google.com> Reviewed-by: Brian Salomon <bsalomon@google.com> Commit-Queue: Greg Daniel <egdaniel@google.com>
This commit is contained in:
parent
2e6feed858
commit
48661b868f
@ -171,7 +171,7 @@ public:
|
||||
* Decodes and uploads the encoded data to a GPU backed image using the supplied GrContext.
|
||||
* That image can be safely used by other GrContexts, across thread boundaries. The GrContext
|
||||
* used here, and the ones used to draw this image later must be in the same GL share group,
|
||||
* or otherwise be able to share resources.
|
||||
* or use the same Vulkan VkDevice and VkQueue, or otherwise be able to share resources.
|
||||
*
|
||||
* When the image's ref count reaches zero, the original GrContext will destroy the texture,
|
||||
* asynchronously.
|
||||
@ -187,7 +187,7 @@ public:
|
||||
* Uploads the pixmap to a GPU backed image using the supplied GrContext.
|
||||
* That image can be safely used by other GrContexts, across thread boundaries. The GrContext
|
||||
* used here, and the ones used to draw this image later must be in the same GL share group,
|
||||
* or otherwise be able to share resources.
|
||||
* or use the same Vulkan VkDevice and VkQueue, or otherwise be able to share resources.
|
||||
*
|
||||
* When the image's ref count reaches zero, the original GrContext will destroy the texture,
|
||||
* asynchronously.
|
||||
|
@ -80,6 +80,7 @@ void GrBackendTextureImageGenerator::ReleaseRefHelper_TextureReleaseProc(void* c
|
||||
SkASSERT(refHelper);
|
||||
|
||||
refHelper->fBorrowedTexture = nullptr;
|
||||
refHelper->fBorrowingContextID = SK_InvalidGenID;
|
||||
refHelper->unref();
|
||||
}
|
||||
|
||||
@ -125,11 +126,12 @@ sk_sp<GrTextureProxy> GrBackendTextureImageGenerator::onGenerateTexture(
|
||||
if (!resourceProvider) {
|
||||
// If we get here then we never created a texture to pass the refHelper ref off
|
||||
// to. Thus we must unref it ourselves.
|
||||
refHelper->fBorrowingContextID = SK_InvalidGenID;
|
||||
refHelper->unref();
|
||||
return sk_sp<GrTexture>();
|
||||
}
|
||||
|
||||
if (semaphore && !semaphore->hasSubmittedWait()) {
|
||||
if (semaphore) {
|
||||
resourceProvider->priv().gpu()->waitSemaphore(semaphore);
|
||||
}
|
||||
|
||||
@ -155,6 +157,7 @@ sk_sp<GrTextureProxy> GrBackendTextureImageGenerator::onGenerateTexture(
|
||||
tex = resourceProvider->wrapBackendTexture(backendTexture,
|
||||
kBorrow_GrWrapOwnership);
|
||||
if (!tex) {
|
||||
refHelper->fBorrowingContextID = SK_InvalidGenID;
|
||||
refHelper->unref();
|
||||
return sk_sp<GrTexture>();
|
||||
}
|
||||
|
@ -517,49 +517,3 @@ void GrGpu::dumpJSON(SkJSONWriter* writer) const {
|
||||
|
||||
writer->endObject();
|
||||
}
|
||||
|
||||
void GrGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) {
|
||||
if (!semaphore) {
|
||||
return;
|
||||
}
|
||||
|
||||
SkASSERT(!semaphore->fSignaled);
|
||||
if (semaphore->fSignaled) {
|
||||
this->onInsertSemaphore(nullptr, flush);
|
||||
return;
|
||||
}
|
||||
this->onInsertSemaphore(semaphore, flush);
|
||||
semaphore->fSignaled = true;
|
||||
}
|
||||
|
||||
void GrGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
|
||||
if (!semaphore) {
|
||||
return;
|
||||
}
|
||||
|
||||
SkASSERT(!semaphore->fWaitedOn);
|
||||
if (!semaphore->fWaitedOn) {
|
||||
this->onWaitSemaphore(semaphore);
|
||||
semaphore->fWaitedOn = true;
|
||||
}
|
||||
}
|
||||
|
||||
sk_sp<GrSemaphore> GrGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrResourceProvider::SemaphoreWrapType wrapType,
|
||||
GrWrapOwnership ownership) {
|
||||
sk_sp<GrSemaphore> grSema = this->onWrapBackendSemaphore(semaphore, ownership);
|
||||
if (GrResourceProvider::SemaphoreWrapType::kWillSignal == wrapType) {
|
||||
// This is a safety check to make sure we never try to wait on this semaphore since we
|
||||
// assume the client will wait on it themselves if they've asked us to signal it.
|
||||
grSema->fWaitedOn = true;
|
||||
} else {
|
||||
SkASSERT(GrResourceProvider::SemaphoreWrapType::kWillWait == wrapType);
|
||||
// This is a safety check to make sure we never try to signal this semaphore since we assume
|
||||
// the client will signal it themselves if they've asked us wait on it.
|
||||
grSema->fSignaled = true;
|
||||
}
|
||||
|
||||
SkASSERT(this->caps()->fenceSyncSupport());
|
||||
return grSema;
|
||||
}
|
||||
|
||||
|
@ -377,11 +377,11 @@ public:
|
||||
virtual void deleteFence(GrFence) const = 0;
|
||||
|
||||
virtual sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned = true) = 0;
|
||||
sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrResourceProvider::SemaphoreWrapType wrapType,
|
||||
GrWrapOwnership ownership);
|
||||
void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush = false);
|
||||
void waitSemaphore(sk_sp<GrSemaphore> semaphore);
|
||||
virtual sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrResourceProvider::SemaphoreWrapType wrapType,
|
||||
GrWrapOwnership ownership) = 0;
|
||||
virtual void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush = false) = 0;
|
||||
virtual void waitSemaphore(sk_sp<GrSemaphore> semaphore) = 0;
|
||||
|
||||
/**
|
||||
* Put this texture in a safe and known state for use across multiple GrContexts. Depending on
|
||||
@ -606,11 +606,6 @@ private:
|
||||
|
||||
virtual void onFinishFlush(bool insertedSemaphores) = 0;
|
||||
|
||||
virtual void onInsertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush = false) = 0;
|
||||
virtual void onWaitSemaphore(sk_sp<GrSemaphore> semaphore) = 0;
|
||||
virtual sk_sp<GrSemaphore> onWrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrWrapOwnership ownership) = 0;
|
||||
|
||||
virtual void onDumpJSON(SkJSONWriter*) const {}
|
||||
|
||||
void resetContext() {
|
||||
|
@ -13,15 +13,7 @@
|
||||
class GrBackendSemaphore;
|
||||
class GrGpu;
|
||||
|
||||
/*
|
||||
* Wrapper around a semaphore object which must be implemented on each backend. This semaphores can
|
||||
* at most be signaled once and waited upon once.
|
||||
*/
|
||||
class GrSemaphore : public SkRefCnt {
|
||||
public:
|
||||
bool hasSubmittedSignal() const { return fSignaled; }
|
||||
bool hasSubmittedWait() const { return fWaitedOn; }
|
||||
|
||||
private:
|
||||
// This function should only be used in the case of exporting and importing a GrSemaphore object
|
||||
// from one GrContext to another. When exporting, the GrSemaphore should be set to a null GrGpu,
|
||||
@ -34,9 +26,6 @@ private:
|
||||
// internal semaphore.
|
||||
virtual void setBackendSemaphore(GrBackendSemaphore*) const = 0;
|
||||
|
||||
bool fSignaled = false;
|
||||
bool fWaitedOn = false;
|
||||
|
||||
protected:
|
||||
explicit GrSemaphore(const GrGpu* gpu) : fGpu(gpu) {}
|
||||
|
||||
|
@ -72,6 +72,18 @@ public:
|
||||
SkASSERT(0);
|
||||
return nullptr;
|
||||
}
|
||||
sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrResourceProvider::SemaphoreWrapType wrapType,
|
||||
GrWrapOwnership ownership) override {
|
||||
SkASSERT(0);
|
||||
return nullptr;
|
||||
}
|
||||
void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override {
|
||||
SkASSERT(0);
|
||||
}
|
||||
void waitSemaphore(sk_sp<GrSemaphore> semaphore) override {
|
||||
SkASSERT(0);
|
||||
}
|
||||
sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override {
|
||||
SkASSERT(0);
|
||||
return nullptr;
|
||||
@ -147,18 +159,6 @@ private:
|
||||
|
||||
void onFinishFlush(bool insertedSemaphores) override { SkASSERT(0); }
|
||||
|
||||
sk_sp<GrSemaphore> onWrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrWrapOwnership ownership) override {
|
||||
SkASSERT(0);
|
||||
return nullptr;
|
||||
}
|
||||
void onInsertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override {
|
||||
SkASSERT(0);
|
||||
}
|
||||
void onWaitSemaphore(sk_sp<GrSemaphore> semaphore) override {
|
||||
SkASSERT(0);
|
||||
}
|
||||
|
||||
GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
|
||||
int width,
|
||||
int height) override;
|
||||
|
@ -4579,29 +4579,27 @@ sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrGLGpu::makeSemaphore(bool isOwned) {
|
||||
return GrGLSemaphore::Make(this, isOwned);
|
||||
}
|
||||
|
||||
sk_sp<GrSemaphore> GrGLGpu::onWrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrWrapOwnership ownership) {
|
||||
sk_sp<GrSemaphore> GrGLGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrResourceProvider::SemaphoreWrapType wrapType,
|
||||
GrWrapOwnership ownership) {
|
||||
SkASSERT(this->caps()->fenceSyncSupport());
|
||||
return GrGLSemaphore::MakeWrapped(this, semaphore.glSync(), ownership);
|
||||
}
|
||||
|
||||
void GrGLGpu::onInsertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) {
|
||||
void GrGLGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) {
|
||||
GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore.get());
|
||||
|
||||
if (glSem) {
|
||||
GrGLsync sync;
|
||||
GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
|
||||
glSem->setSync(sync);
|
||||
}
|
||||
GrGLsync sync;
|
||||
GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
|
||||
glSem->setSync(sync);
|
||||
|
||||
if (flush) {
|
||||
GL_CALL(Flush());
|
||||
}
|
||||
}
|
||||
|
||||
void GrGLGpu::onWaitSemaphore(sk_sp<GrSemaphore> semaphore) {
|
||||
void GrGLGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
|
||||
GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore.get());
|
||||
SkASSERT(glSem);
|
||||
|
||||
GL_CALL(WaitSync(glSem->sync(), 0, GR_GL_TIMEOUT_IGNORED));
|
||||
}
|
||||
|
@ -173,6 +173,11 @@ public:
|
||||
void deleteFence(GrFence) const override;
|
||||
|
||||
sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override;
|
||||
sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrResourceProvider::SemaphoreWrapType wrapType,
|
||||
GrWrapOwnership ownership) override;
|
||||
void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override;
|
||||
void waitSemaphore(sk_sp<GrSemaphore> semaphore) override;
|
||||
|
||||
sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
|
||||
|
||||
@ -286,11 +291,6 @@ private:
|
||||
|
||||
void onFinishFlush(bool insertedSemaphores) override;
|
||||
|
||||
void onInsertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override;
|
||||
void onWaitSemaphore(sk_sp<GrSemaphore> semaphore) override;
|
||||
sk_sp<GrSemaphore> onWrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrWrapOwnership ownership) override;
|
||||
|
||||
bool hasExtension(const char* ext) const { return fGLContext->hasExtension(ext); }
|
||||
|
||||
bool copySurfaceAsDraw(GrSurface* dst, GrSurfaceOrigin dstOrigin,
|
||||
|
@ -58,6 +58,11 @@ public:
|
||||
sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override {
|
||||
return nullptr;
|
||||
}
|
||||
sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrResourceProvider::SemaphoreWrapType wrapType,
|
||||
GrWrapOwnership ownership) override { return nullptr; }
|
||||
void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override {}
|
||||
void waitSemaphore(sk_sp<GrSemaphore> semaphore) override {}
|
||||
sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override { return nullptr; }
|
||||
|
||||
void submitCommandBuffer(const GrMockGpuRTCommandBuffer*);
|
||||
@ -120,11 +125,6 @@ private:
|
||||
|
||||
void onFinishFlush(bool insertedSemaphores) override {}
|
||||
|
||||
sk_sp<GrSemaphore> onWrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrWrapOwnership ownership) override { return nullptr; }
|
||||
void onInsertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override {}
|
||||
void onWaitSemaphore(sk_sp<GrSemaphore> semaphore) override {}
|
||||
|
||||
GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
|
||||
int width,
|
||||
int height) override;
|
||||
|
@ -67,6 +67,11 @@ public:
|
||||
sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override {
|
||||
return nullptr;
|
||||
}
|
||||
sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrResourceProvider::SemaphoreWrapType wrapType,
|
||||
GrWrapOwnership ownership) override { return nullptr; }
|
||||
void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override {}
|
||||
void waitSemaphore(sk_sp<GrSemaphore> semaphore) override {}
|
||||
sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override { return nullptr; }
|
||||
|
||||
private:
|
||||
@ -129,11 +134,6 @@ private:
|
||||
|
||||
void onFinishFlush(bool insertedSemaphores) override {}
|
||||
|
||||
sk_sp<GrSemaphore> onWrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrWrapOwnership ownership) override { return nullptr; }
|
||||
void onInsertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override {}
|
||||
void onWaitSemaphore(sk_sp<GrSemaphore> semaphore) override {}
|
||||
|
||||
GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
|
||||
int width,
|
||||
int height) override {
|
||||
|
@ -445,12 +445,36 @@ void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu,
|
||||
this->invalidateState();
|
||||
}
|
||||
|
||||
static void submit_to_queue(const GrVkInterface* interface,
|
||||
VkQueue queue,
|
||||
VkFence fence,
|
||||
uint32_t waitCount,
|
||||
const VkSemaphore* waitSemaphores,
|
||||
const VkPipelineStageFlags* waitStages,
|
||||
uint32_t commandBufferCount,
|
||||
const VkCommandBuffer* commandBuffers,
|
||||
uint32_t signalCount,
|
||||
const VkSemaphore* signalSemaphores) {
|
||||
VkSubmitInfo submitInfo;
|
||||
memset(&submitInfo, 0, sizeof(VkSubmitInfo));
|
||||
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
|
||||
submitInfo.pNext = nullptr;
|
||||
submitInfo.waitSemaphoreCount = waitCount;
|
||||
submitInfo.pWaitSemaphores = waitSemaphores;
|
||||
submitInfo.pWaitDstStageMask = waitStages;
|
||||
submitInfo.commandBufferCount = commandBufferCount;
|
||||
submitInfo.pCommandBuffers = commandBuffers;
|
||||
submitInfo.signalSemaphoreCount = signalCount;
|
||||
submitInfo.pSignalSemaphores = signalSemaphores;
|
||||
GR_VK_CALL_ERRCHECK(interface, QueueSubmit(queue, 1, &submitInfo, fence));
|
||||
}
|
||||
|
||||
void GrVkPrimaryCommandBuffer::submitToQueue(
|
||||
const GrVkGpu* gpu,
|
||||
VkQueue queue,
|
||||
GrVkGpu::SyncQueue sync,
|
||||
SkTArray<const GrVkSemaphore::Resource*>& signalSemaphores,
|
||||
SkTArray<const GrVkSemaphore::Resource*>& waitSemaphores) {
|
||||
SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
|
||||
SkTArray<GrVkSemaphore::Resource*>& waitSemaphores) {
|
||||
SkASSERT(!fIsActive);
|
||||
|
||||
VkResult err;
|
||||
@ -466,33 +490,51 @@ void GrVkPrimaryCommandBuffer::submitToQueue(
|
||||
}
|
||||
|
||||
int signalCount = signalSemaphores.count();
|
||||
SkTArray<VkSemaphore> vkSignalSem(signalCount);
|
||||
for (int i = 0; i < signalCount; ++i) {
|
||||
this->addResource(signalSemaphores[i]);
|
||||
vkSignalSem.push_back(signalSemaphores[i]->semaphore());
|
||||
}
|
||||
|
||||
int waitCount = waitSemaphores.count();
|
||||
SkTArray<VkSemaphore> vkWaitSems(waitCount);
|
||||
SkTArray<VkPipelineStageFlags> vkWaitStages(waitCount);
|
||||
for (int i = 0; i < waitCount; ++i) {
|
||||
this->addResource(waitSemaphores[i]);
|
||||
vkWaitSems.push_back(waitSemaphores[i]->semaphore());
|
||||
vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
|
||||
}
|
||||
|
||||
VkSubmitInfo submitInfo;
|
||||
memset(&submitInfo, 0, sizeof(VkSubmitInfo));
|
||||
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
|
||||
submitInfo.pNext = nullptr;
|
||||
submitInfo.waitSemaphoreCount = waitCount;
|
||||
submitInfo.pWaitSemaphores = vkWaitSems.begin();
|
||||
submitInfo.pWaitDstStageMask = vkWaitStages.begin();
|
||||
submitInfo.commandBufferCount = 1;
|
||||
submitInfo.pCommandBuffers = &fCmdBuffer;
|
||||
submitInfo.signalSemaphoreCount = vkSignalSem.count();
|
||||
submitInfo.pSignalSemaphores = vkSignalSem.begin();
|
||||
GR_VK_CALL_ERRCHECK(gpu->vkInterface(), QueueSubmit(queue, 1, &submitInfo, fSubmitFence));
|
||||
if (0 == signalCount && 0 == waitCount) {
|
||||
// This command buffer has no dependent semaphores so we can simply just submit it to the
|
||||
// queue with no worries.
|
||||
submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, 0, nullptr, nullptr,
|
||||
1, &fCmdBuffer, 0, nullptr);
|
||||
} else {
|
||||
GrVkSemaphore::Resource::AcquireMutex();
|
||||
|
||||
SkTArray<VkSemaphore> vkSignalSems(signalCount);
|
||||
for (int i = 0; i < signalCount; ++i) {
|
||||
if (signalSemaphores[i]->shouldSignal()) {
|
||||
this->addResource(signalSemaphores[i]);
|
||||
vkSignalSems.push_back(signalSemaphores[i]->semaphore());
|
||||
}
|
||||
}
|
||||
|
||||
SkTArray<VkSemaphore> vkWaitSems(waitCount);
|
||||
SkTArray<VkPipelineStageFlags> vkWaitStages(waitCount);
|
||||
for (int i = 0; i < waitCount; ++i) {
|
||||
if (waitSemaphores[i]->shouldWait()) {
|
||||
this->addResource(waitSemaphores[i]);
|
||||
vkWaitSems.push_back(waitSemaphores[i]->semaphore());
|
||||
vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
|
||||
}
|
||||
}
|
||||
submit_to_queue(gpu->vkInterface(), queue, fSubmitFence,
|
||||
vkWaitSems.count(), vkWaitSems.begin(), vkWaitStages.begin(),
|
||||
1, &fCmdBuffer,
|
||||
vkSignalSems.count(), vkSignalSems.begin());
|
||||
// Since shouldSignal/Wait do not require a mutex to be held, we must make sure that we mark
|
||||
// the semaphores after we've submitted. Thus in the worst case another submit grabs the
|
||||
// mutex and then realizes it doesn't need to submit the semaphore. We will never end up
|
||||
// where a semaphore doesn't think it needs to be submitted (cause of querying
|
||||
// shouldSignal/Wait), but it should need to.
|
||||
for (int i = 0; i < signalCount; ++i) {
|
||||
signalSemaphores[i]->markAsSignaled();
|
||||
}
|
||||
for (int i = 0; i < waitCount; ++i) {
|
||||
waitSemaphores[i]->markAsWaited();
|
||||
}
|
||||
|
||||
GrVkSemaphore::Resource::ReleaseMutex();
|
||||
}
|
||||
|
||||
if (GrVkGpu::kForce_SyncQueue == sync) {
|
||||
err = GR_VK_CALL(gpu->vkInterface(),
|
||||
|
@ -269,8 +269,8 @@ public:
|
||||
const VkImageResolve* regions);
|
||||
|
||||
void submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu::SyncQueue sync,
|
||||
SkTArray<const GrVkSemaphore::Resource*>& signalSemaphores,
|
||||
SkTArray<const GrVkSemaphore::Resource*>& waitSemaphores);
|
||||
SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
|
||||
SkTArray<GrVkSemaphore::Resource*>& waitSemaphores);
|
||||
bool finished(const GrVkGpu* gpu) const;
|
||||
|
||||
#ifdef SK_TRACE_VK_RESOURCES
|
||||
|
@ -2162,16 +2162,17 @@ sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrVkGpu::makeSemaphore(bool isOwned) {
|
||||
return GrVkSemaphore::Make(this, isOwned);
|
||||
}
|
||||
|
||||
sk_sp<GrSemaphore> GrVkGpu::onWrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrWrapOwnership ownership) {
|
||||
return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), ownership);
|
||||
sk_sp<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrResourceProvider::SemaphoreWrapType wrapType,
|
||||
GrWrapOwnership ownership) {
|
||||
return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), wrapType, ownership);
|
||||
}
|
||||
|
||||
void GrVkGpu::onInsertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) {
|
||||
void GrVkGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) {
|
||||
GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore.get());
|
||||
|
||||
if (vkSem) {
|
||||
const GrVkSemaphore::Resource* resource = vkSem->getResource();
|
||||
GrVkSemaphore::Resource* resource = vkSem->getResource();
|
||||
if (resource->shouldSignal()) {
|
||||
resource->ref();
|
||||
fSemaphoresToSignal.push_back(resource);
|
||||
}
|
||||
@ -2181,13 +2182,14 @@ void GrVkGpu::onInsertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) {
|
||||
}
|
||||
}
|
||||
|
||||
void GrVkGpu::onWaitSemaphore(sk_sp<GrSemaphore> semaphore) {
|
||||
void GrVkGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
|
||||
GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore.get());
|
||||
SkASSERT(vkSem);
|
||||
|
||||
const GrVkSemaphore::Resource* resource = vkSem->getResource();
|
||||
resource->ref();
|
||||
fSemaphoresToWaitOn.push_back(resource);
|
||||
GrVkSemaphore::Resource* resource = vkSem->getResource();
|
||||
if (resource->shouldWait()) {
|
||||
resource->ref();
|
||||
fSemaphoresToWaitOn.push_back(resource);
|
||||
}
|
||||
}
|
||||
|
||||
sk_sp<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
|
||||
|
@ -135,6 +135,12 @@ public:
|
||||
void deleteFence(GrFence) const override;
|
||||
|
||||
sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override;
|
||||
sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrResourceProvider::SemaphoreWrapType wrapType,
|
||||
GrWrapOwnership ownership) override;
|
||||
void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override;
|
||||
void waitSemaphore(sk_sp<GrSemaphore> semaphore) override;
|
||||
|
||||
sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
|
||||
|
||||
void generateMipmap(GrVkTexture* tex, GrSurfaceOrigin texOrigin);
|
||||
@ -204,11 +210,6 @@ private:
|
||||
|
||||
void onFinishFlush(bool insertedSemaphores) override;
|
||||
|
||||
void onInsertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override;
|
||||
void onWaitSemaphore(sk_sp<GrSemaphore> semaphore) override;
|
||||
sk_sp<GrSemaphore> onWrapBackendSemaphore(const GrBackendSemaphore& semaphore,
|
||||
GrWrapOwnership ownership) override;
|
||||
|
||||
// Ends and submits the current command buffer to the queue and then creates a new command
|
||||
// buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all
|
||||
// work in the queue to finish before returning. If this GrVkGpu object has any semaphores in
|
||||
@ -265,8 +266,8 @@ private:
|
||||
|
||||
GrVkPrimaryCommandBuffer* fCurrentCmdBuffer;
|
||||
|
||||
SkSTArray<1, const GrVkSemaphore::Resource*> fSemaphoresToWaitOn;
|
||||
SkSTArray<1, const GrVkSemaphore::Resource*> fSemaphoresToSignal;
|
||||
SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn;
|
||||
SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal;
|
||||
|
||||
VkPhysicalDeviceMemoryProperties fPhysDevMemProps;
|
||||
|
||||
|
@ -16,6 +16,8 @@
|
||||
#undef CreateSemaphore
|
||||
#endif
|
||||
|
||||
SkMutex GrVkSemaphore::Resource::gMutex;
|
||||
|
||||
sk_sp<GrVkSemaphore> GrVkSemaphore::Make(const GrVkGpu* gpu, bool isOwned) {
|
||||
VkSemaphoreCreateInfo createInfo;
|
||||
memset(&createInfo, 0, sizeof(VkSemaphoreCreateInfo));
|
||||
@ -26,22 +28,26 @@ sk_sp<GrVkSemaphore> GrVkSemaphore::Make(const GrVkGpu* gpu, bool isOwned) {
|
||||
GR_VK_CALL_ERRCHECK(gpu->vkInterface(),
|
||||
CreateSemaphore(gpu->device(), &createInfo, nullptr, &semaphore));
|
||||
|
||||
return sk_sp<GrVkSemaphore>(new GrVkSemaphore(gpu, semaphore, isOwned));
|
||||
return sk_sp<GrVkSemaphore>(new GrVkSemaphore(gpu, semaphore, false, false, isOwned));
|
||||
}
|
||||
|
||||
sk_sp<GrVkSemaphore> GrVkSemaphore::MakeWrapped(const GrVkGpu* gpu,
|
||||
VkSemaphore semaphore,
|
||||
WrapType wrapType,
|
||||
GrWrapOwnership ownership) {
|
||||
if (VK_NULL_HANDLE == semaphore) {
|
||||
return nullptr;
|
||||
}
|
||||
return sk_sp<GrVkSemaphore>(new GrVkSemaphore(gpu, semaphore,
|
||||
bool prohibitSignal = WrapType::kWillWait == wrapType;
|
||||
bool prohibitWait = WrapType::kWillSignal == wrapType;
|
||||
return sk_sp<GrVkSemaphore>(new GrVkSemaphore(gpu, semaphore, prohibitSignal, prohibitWait,
|
||||
kBorrow_GrWrapOwnership != ownership));
|
||||
}
|
||||
|
||||
GrVkSemaphore::GrVkSemaphore(const GrVkGpu* gpu, VkSemaphore semaphore, bool isOwned)
|
||||
GrVkSemaphore::GrVkSemaphore(const GrVkGpu* gpu, VkSemaphore semaphore, bool prohibitSignal,
|
||||
bool prohibitWait, bool isOwned)
|
||||
: INHERITED(gpu) {
|
||||
fResource = new Resource(semaphore, isOwned);
|
||||
fResource = new Resource(semaphore, prohibitSignal, prohibitWait, isOwned);
|
||||
}
|
||||
|
||||
GrVkSemaphore::~GrVkSemaphore() {
|
||||
|
@ -9,6 +9,8 @@
|
||||
#define GrVkSemaphore_DEFINED
|
||||
|
||||
#include "GrSemaphore.h"
|
||||
|
||||
#include "GrResourceProvider.h"
|
||||
#include "GrVkResource.h"
|
||||
|
||||
#include "vk/GrVkTypes.h"
|
||||
@ -20,21 +22,47 @@ class GrVkSemaphore : public GrSemaphore {
|
||||
public:
|
||||
static sk_sp<GrVkSemaphore> Make(const GrVkGpu* gpu, bool isOwned);
|
||||
|
||||
using WrapType = GrResourceProvider::SemaphoreWrapType;
|
||||
|
||||
static sk_sp<GrVkSemaphore> MakeWrapped(const GrVkGpu* gpu,
|
||||
VkSemaphore semaphore,
|
||||
WrapType wrapType,
|
||||
GrWrapOwnership);
|
||||
|
||||
~GrVkSemaphore() override;
|
||||
|
||||
class Resource : public GrVkResource {
|
||||
public:
|
||||
Resource(VkSemaphore semaphore, bool isOwned)
|
||||
: INHERITED(), fSemaphore(semaphore), fIsOwned(isOwned) {}
|
||||
Resource(VkSemaphore semaphore, bool prohibitSignal, bool prohibitWait, bool isOwned)
|
||||
: INHERITED()
|
||||
, fSemaphore(semaphore)
|
||||
, fHasBeenSubmittedToQueueForSignal(prohibitSignal)
|
||||
, fHasBeenSubmittedToQueueForWait(prohibitWait)
|
||||
, fIsOwned(isOwned) {}
|
||||
|
||||
~Resource() override {}
|
||||
|
||||
VkSemaphore semaphore() const { return fSemaphore; }
|
||||
|
||||
static void AcquireMutex() { gMutex.acquire(); }
|
||||
static void ReleaseMutex() { gMutex.release(); }
|
||||
|
||||
bool shouldSignal() const {
|
||||
return !fHasBeenSubmittedToQueueForSignal;
|
||||
}
|
||||
bool shouldWait() const {
|
||||
return !fHasBeenSubmittedToQueueForWait;
|
||||
}
|
||||
|
||||
void markAsSignaled() {
|
||||
gMutex.assertHeld();
|
||||
fHasBeenSubmittedToQueueForSignal = true;
|
||||
}
|
||||
void markAsWaited() {
|
||||
gMutex.assertHeld();
|
||||
fHasBeenSubmittedToQueueForWait = true;
|
||||
}
|
||||
|
||||
#ifdef SK_TRACE_VK_RESOURCES
|
||||
void dumpInfo() const override {
|
||||
SkDebugf("GrVkSemaphore: %d (%d refs)\n", fSemaphore, this->getRefCnt());
|
||||
@ -43,20 +71,24 @@ public:
|
||||
private:
|
||||
void freeGPUData(const GrVkGpu* gpu) const override;
|
||||
|
||||
static SkMutex gMutex;
|
||||
VkSemaphore fSemaphore;
|
||||
bool fHasBeenSubmittedToQueueForSignal;
|
||||
bool fHasBeenSubmittedToQueueForWait;
|
||||
bool fIsOwned;
|
||||
|
||||
typedef GrVkResource INHERITED;
|
||||
};
|
||||
|
||||
const Resource* getResource() const { return fResource; }
|
||||
Resource* getResource() { return fResource; }
|
||||
|
||||
private:
|
||||
GrVkSemaphore(const GrVkGpu* gpu, VkSemaphore semaphore, bool isOwned);
|
||||
GrVkSemaphore(const GrVkGpu* gpu, VkSemaphore semaphore, bool prohibitSignal, bool prohibitWait,
|
||||
bool isOwned);
|
||||
|
||||
void setBackendSemaphore(GrBackendSemaphore*) const override;
|
||||
|
||||
const Resource* fResource;
|
||||
Resource* fResource;
|
||||
|
||||
typedef GrSemaphore INHERITED;
|
||||
};
|
||||
|
@ -894,12 +894,11 @@ static void test_cross_context_image(skiatest::Reporter* reporter, const GrConte
|
||||
proxy.reset(nullptr);
|
||||
proxySecondRef.reset(nullptr);
|
||||
|
||||
// Now we should still fail to be able to borrow the texture from the other context
|
||||
// since only one context can ever borrow
|
||||
// Now we should be able to borrow the texture from the other context
|
||||
otherTestContext->makeCurrent();
|
||||
otherProxy = as_IB(refImg)->asTextureProxyRef(otherCtx, GrSamplerState::ClampNearest(),
|
||||
nullptr, &texColorSpace, nullptr);
|
||||
REPORTER_ASSERT(reporter, !otherProxy);
|
||||
REPORTER_ASSERT(reporter, otherProxy);
|
||||
|
||||
// Release everything
|
||||
otherProxy.reset(nullptr);
|
||||
|
Loading…
Reference in New Issue
Block a user