Make finish flush procs work with GL.
This will be required for async readback support. This is implemented using sync objects when available and otherwise calls glFinish. Relaxes the unit test requirement that providing a callback with no work to flush always calls the proc immediately. Bug: skia:8962 Change-Id: Ieefcab6dccc3924e50260343f01904e7303bb12b Reviewed-on: https://skia-review.googlesource.com/c/skia/+/212198 Commit-Queue: Brian Salomon <bsalomon@google.com> Reviewed-by: Robert Phillips <robertphillips@google.com>
This commit is contained in:
parent
d05c5eec42
commit
b0d8b76c4f
@ -296,6 +296,11 @@ public:
|
||||
return this->flush(info);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether any asynchronous work is complete and if so calls related callbacks.
|
||||
*/
|
||||
void checkAsyncWorkCompletion();
|
||||
|
||||
// Provides access to functions that aren't part of the public API.
|
||||
GrContextPriv priv();
|
||||
const GrContextPriv priv() const;
|
||||
|
@ -270,6 +270,14 @@ GrSemaphoresSubmitted GrContext::flush(const GrFlushInfo& info) {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void GrContext::checkAsyncWorkCompletion() {
|
||||
if (fGpu) {
|
||||
fGpu->checkFinishProcs();
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void GrContext::storeVkPipelineCacheData() {
|
||||
if (fGpu) {
|
||||
fGpu->storeVkPipelineCacheData();
|
||||
|
@ -319,6 +319,8 @@ public:
|
||||
virtual void insertSemaphore(sk_sp<GrSemaphore> semaphore) = 0;
|
||||
virtual void waitSemaphore(sk_sp<GrSemaphore> semaphore) = 0;
|
||||
|
||||
virtual void checkFinishProcs() = 0;
|
||||
|
||||
/**
|
||||
* Put this texture in a safe and known state for use across multiple GrContexts. Depending on
|
||||
* the backend, this may return a GrSemaphore. If so, other contexts should wait on that
|
||||
|
@ -4278,15 +4278,35 @@ void GrGLGpu::onFinishFlush(GrSurfaceProxy*, SkSurface::BackendSurfaceAccess acc
|
||||
const GrFlushInfo& info) {
|
||||
// If we inserted semaphores during the flush, we need to call GLFlush.
|
||||
bool insertedSemaphore = info.fNumSemaphores > 0 && this->caps()->fenceSyncSupport();
|
||||
if (insertedSemaphore) {
|
||||
GL_CALL(Flush());
|
||||
}
|
||||
if (info.fFlags & kSyncCpu_GrFlushFlag) {
|
||||
// We call finish if the client told us to sync or if we have a finished proc but don't support
|
||||
// GLsync objects.
|
||||
bool finish = (info.fFlags & kSyncCpu_GrFlushFlag) ||
|
||||
(info.fFinishedProc && !this->caps()->fenceSyncSupport());
|
||||
if (finish) {
|
||||
GL_CALL(Finish());
|
||||
}
|
||||
// TODO: We should have GL actually wait until the GPU has finished work on the GPU.
|
||||
if (info.fFinishedProc) {
|
||||
info.fFinishedProc(info.fFinishedContext);
|
||||
// After a finish everything previously sent to GL is done.
|
||||
for (const auto& cb : fFinishCallbacks) {
|
||||
cb.fCallback(cb.fContext);
|
||||
this->deleteSync(cb.fSync);
|
||||
}
|
||||
fFinishCallbacks.clear();
|
||||
if (info.fFinishedProc) {
|
||||
info.fFinishedProc(info.fFinishedContext);
|
||||
}
|
||||
} else {
|
||||
if (info.fFinishedProc) {
|
||||
FinishCallback callback;
|
||||
callback.fCallback = info.fFinishedProc;
|
||||
callback.fContext = info.fFinishedContext;
|
||||
callback.fSync = (GrGLsync)this->insertFence();
|
||||
fFinishCallbacks.push_back(callback);
|
||||
GL_CALL(Flush());
|
||||
} else if (insertedSemaphore) {
|
||||
// Must call flush after semaphores in case they are waited on another GL context.
|
||||
GL_CALL(Flush());
|
||||
}
|
||||
// See if any previously inserted finish procs are good to go.
|
||||
this->checkFinishProcs();
|
||||
}
|
||||
}
|
||||
|
||||
@ -4308,10 +4328,15 @@ GrFence SK_WARN_UNUSED_RESULT GrGLGpu::insertFence() {
|
||||
return (GrFence)sync;
|
||||
}
|
||||
|
||||
bool GrGLGpu::waitFence(GrFence fence, uint64_t timeout) {
|
||||
bool GrGLGpu::waitSync(GrGLsync sync, uint64_t timeout, bool flush) {
|
||||
GrGLbitfield flags = flush ? GR_GL_SYNC_FLUSH_COMMANDS_BIT : 0;
|
||||
GrGLenum result;
|
||||
GL_CALL_RET(result, ClientWaitSync((GrGLsync)fence, GR_GL_SYNC_FLUSH_COMMANDS_BIT, timeout));
|
||||
return (GR_GL_CONDITION_SATISFIED == result);
|
||||
GL_CALL_RET(result, ClientWaitSync(sync, flags, timeout));
|
||||
return (GR_GL_CONDITION_SATISFIED == result || GR_GL_ALREADY_SIGNALED == result);
|
||||
}
|
||||
|
||||
bool GrGLGpu::waitFence(GrFence fence, uint64_t timeout) {
|
||||
return this->waitSync((GrGLsync)fence, timeout, /* flush = */ true);
|
||||
}
|
||||
|
||||
void GrGLGpu::deleteFence(GrFence fence) const {
|
||||
@ -4344,6 +4369,16 @@ void GrGLGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
|
||||
GL_CALL(WaitSync(glSem->sync(), 0, GR_GL_TIMEOUT_IGNORED));
|
||||
}
|
||||
|
||||
void GrGLGpu::checkFinishProcs() {
|
||||
// Bail after the first unfinished sync since we expect they signal in the order inserted.
|
||||
while (!fFinishCallbacks.empty() && this->waitSync(fFinishCallbacks.front().fSync,
|
||||
/* timeout = */ 0, /* flush = */ false)) {
|
||||
fFinishCallbacks.front().fCallback(fFinishCallbacks.front().fContext);
|
||||
this->deleteSync(fFinishCallbacks.front().fSync);
|
||||
fFinishCallbacks.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
void GrGLGpu::deleteSync(GrGLsync sync) const {
|
||||
GL_CALL(DeleteSync(sync));
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
#ifndef GrGLGpu_DEFINED
|
||||
#define GrGLGpu_DEFINED
|
||||
|
||||
#include <list>
|
||||
#include "include/core/SkTypes.h"
|
||||
#include "include/private/SkTArray.h"
|
||||
#include "src/core/SkLRUCache.h"
|
||||
@ -167,6 +168,8 @@ public:
|
||||
void insertSemaphore(sk_sp<GrSemaphore> semaphore) override;
|
||||
void waitSemaphore(sk_sp<GrSemaphore> semaphore) override;
|
||||
|
||||
void checkFinishProcs() override;
|
||||
|
||||
sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
|
||||
|
||||
void deleteSync(GrGLsync) const;
|
||||
@ -295,6 +298,8 @@ private:
|
||||
void onFinishFlush(GrSurfaceProxy*, SkSurface::BackendSurfaceAccess access,
|
||||
const GrFlushInfo&) override;
|
||||
|
||||
bool waitSync(GrGLsync, uint64_t timeout, bool flush);
|
||||
|
||||
bool copySurfaceAsDraw(GrSurface* dst, GrSurfaceOrigin dstOrigin,
|
||||
GrSurface* src, GrSurfaceOrigin srcOrigin,
|
||||
const SkIRect& srcRect, const SkIPoint& dstPoint);
|
||||
@ -665,6 +670,12 @@ private:
|
||||
std::unique_ptr<GrGLGpuRTCommandBuffer> fCachedRTCommandBuffer;
|
||||
std::unique_ptr<GrGLGpuTextureCommandBuffer> fCachedTexCommandBuffer;
|
||||
|
||||
struct FinishCallback {
|
||||
GrGpuFinishedProc fCallback;
|
||||
GrGpuFinishedContext fContext;
|
||||
GrGLsync fSync;
|
||||
};
|
||||
std::list<FinishCallback> fFinishCallbacks;
|
||||
friend class GrGLPathRendering; // For accessing setTextureUnit.
|
||||
|
||||
typedef GrGpu INHERITED;
|
||||
|
@ -47,6 +47,8 @@ public:
|
||||
|
||||
void submit(GrGpuCommandBuffer* buffer) override;
|
||||
|
||||
void checkFinishProcs() override {}
|
||||
|
||||
private:
|
||||
GrMockGpu(GrContext* context, const GrMockOptions&, const GrContextOptions&);
|
||||
|
||||
|
@ -116,6 +116,8 @@ public:
|
||||
GrWrapOwnership ownership) override { return nullptr; }
|
||||
void insertSemaphore(sk_sp<GrSemaphore> semaphore) override {}
|
||||
void waitSemaphore(sk_sp<GrSemaphore> semaphore) override {}
|
||||
// We currently call finish procs immediately in onFinishFlush().
|
||||
void checkFinishProcs() override {}
|
||||
sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override { return nullptr; }
|
||||
|
||||
// When the Metal backend actually uses indirect command buffers, this function will actually do
|
||||
|
@ -164,6 +164,8 @@ public:
|
||||
// command buffer to the gpu.
|
||||
void addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable);
|
||||
|
||||
void checkFinishProcs() override { fResourceProvider.checkCommandBuffers(); }
|
||||
|
||||
sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
|
||||
|
||||
void copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include "tests/Test.h"
|
||||
|
||||
#include <chrono>
|
||||
#include "include/core/SkSurface.h"
|
||||
#include "include/gpu/GrContext.h"
|
||||
#include "src/gpu/GrContextPriv.h"
|
||||
@ -19,6 +20,21 @@ static void testing_finished_proc(void* ctx) {
|
||||
*count += 1;
|
||||
}
|
||||
|
||||
static void busy_wait_for_callback(int* count, int expectedValue, GrContext* ctx,
|
||||
skiatest::Reporter* reporter) {
|
||||
// Busy waiting should detect that the work is done.
|
||||
auto begin = std::chrono::steady_clock::now();
|
||||
auto end = begin;
|
||||
do {
|
||||
ctx->checkAsyncWorkCompletion();
|
||||
end = std::chrono::steady_clock::now();
|
||||
} while (*count != expectedValue && (end - begin) < std::chrono::seconds(1));
|
||||
if (*count != expectedValue) {
|
||||
ERRORF(reporter, "Expected count failed to reach %d within 1 second of busy waiting.",
|
||||
expectedValue);
|
||||
}
|
||||
}
|
||||
|
||||
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(FlushFinishedProcTest, reporter, ctxInfo) {
|
||||
GrContext* ctx = ctxInfo.grContext();
|
||||
|
||||
@ -39,17 +55,20 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(FlushFinishedProcTest, reporter, ctxInfo) {
|
||||
GrFlushInfo flushInfoFinishedProc;
|
||||
flushInfoFinishedProc.fFinishedProc = testing_finished_proc;
|
||||
flushInfoFinishedProc.fFinishedContext = (void*)&count;
|
||||
// There is no work on the surface so flushing should immediately call the finished proc.
|
||||
// There is no work on the surface so flushing may immediately call the finished proc.
|
||||
surface->flush(SkSurface::BackendSurfaceAccess::kNoAccess, flushInfoFinishedProc);
|
||||
|
||||
REPORTER_ASSERT(reporter, count == 1);
|
||||
REPORTER_ASSERT(reporter, count == 0 || count == 1);
|
||||
// Busy waiting should detect that the work is done.
|
||||
busy_wait_for_callback(&count, 1, ctx, reporter);
|
||||
|
||||
canvas->clear(SK_ColorRED);
|
||||
|
||||
surface->flush(SkSurface::BackendSurfaceAccess::kNoAccess, flushInfoFinishedProc);
|
||||
|
||||
bool isVulkan = ctx->backend() == GrBackendApi::kVulkan;
|
||||
if (isVulkan) {
|
||||
bool expectAsyncCallback =
|
||||
ctx->backend() == GrBackendApi::kVulkan ||
|
||||
((ctx->backend() == GrBackendApi::kOpenGL) && ctx->priv().caps()->fenceSyncSupport());
|
||||
if (expectAsyncCallback) {
|
||||
// On Vulkan the command buffer we just submitted may or may not have finished immediately
|
||||
// so the finish proc may not have been called.
|
||||
REPORTER_ASSERT(reporter, count == 1 || count == 2);
|
||||
@ -62,7 +81,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(FlushFinishedProcTest, reporter, ctxInfo) {
|
||||
// Test flushing via the GrContext
|
||||
canvas->clear(SK_ColorBLUE);
|
||||
ctx->flush(flushInfoFinishedProc);
|
||||
if (isVulkan) {
|
||||
if (expectAsyncCallback) {
|
||||
// On Vulkan the command buffer we just submitted may or may not have finished immediately
|
||||
// so the finish proc may not have been called.
|
||||
REPORTER_ASSERT(reporter, count == 2 || count == 3);
|
||||
@ -72,9 +91,10 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(FlushFinishedProcTest, reporter, ctxInfo) {
|
||||
ctx->flush(flushInfoSyncCpu);
|
||||
REPORTER_ASSERT(reporter, count == 3);
|
||||
|
||||
// There is no work on the surface so flushing should immediately call the finished proc.
|
||||
// There is no work on the surface so flushing may immediately call the finished proc.
|
||||
ctx->flush(flushInfoFinishedProc);
|
||||
REPORTER_ASSERT(reporter, count == 4);
|
||||
REPORTER_ASSERT(reporter, count == 3 || count == 4);
|
||||
busy_wait_for_callback(&count, 4, ctx, reporter);
|
||||
|
||||
count = 0;
|
||||
int count2 = 0;
|
||||
@ -84,8 +104,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(FlushFinishedProcTest, reporter, ctxInfo) {
|
||||
// finished call from this proc isn't called till the previous surface flush also is finished.
|
||||
flushInfoFinishedProc.fFinishedContext = (void*)&count2;
|
||||
ctx->flush(flushInfoFinishedProc);
|
||||
|
||||
REPORTER_ASSERT(reporter, count == count2);
|
||||
REPORTER_ASSERT(reporter, count <= 1 && count2 <= count);
|
||||
|
||||
ctx->flush(flushInfoSyncCpu);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user