Remove background reset of command pools in Vulkan.

In practice it has been seen that the cost of spinning up and thread and
sending over this small bit of work is not worth the savings of moving
the command pool reset to another thread. Thus we don't currently have
clients taking advantage of this feature. For now I am removing this
feature and we can add it back in later if we find a better use case or
have more total work to send to a helper thread.

Also in this change I reverse the order of resetting the VkCommandPool
and freeing the resources attached to the pool to workaround a driver
bug.

Bug: chromium:1161411
Change-Id: I8c399ecd8005ada29090902ba9b18b6837096716
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/477659
Reviewed-by: Jim Van Verth <jvanverth@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
This commit is contained in:
Greg Daniel 2021-11-29 15:29:39 -05:00 committed by SkCQ
parent 6a13ff1c8d
commit 6e523fdd6e
4 changed files with 30 additions and 51 deletions

View File

@ -77,14 +77,25 @@ void GrVkCommandPool::close() {
}
void GrVkCommandPool::reset(GrVkGpu* gpu) {
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
SkASSERT(!fOpen);
fOpen = true;
// We can't use the normal result macro calls here because we may call reset on a different
// thread and we can't be modifying the lost state on the GrVkGpu. We just call
// vkResetCommandPool and assume the "next" vulkan call will catch the lost device.
SkDEBUGCODE(VkResult result = )GR_VK_CALL(gpu->vkInterface(),
ResetCommandPool(gpu->device(), fCommandPool, 0));
SkASSERT(result == VK_SUCCESS || result == VK_ERROR_DEVICE_LOST);
// It should be safe to release the resources before actually resetting the VkCommandPool.
// However, on qualcomm devices running R drivers there was a few months period where the driver
// had a bug which it incorrectly was accessing objects on the command buffer while it was being
// reset. If these objects were already destroyed (which is a valid thing to do) it would crash.
// So to be safe we do the reset first since it doesn't really matter when single threaded. If
// we ever add back in threaded resets we'll want to add checks to make sure everything happens
// in the right order (and probably do single threaded resets on bad devices).
this->releaseResources();
fOpen = true;
}
void GrVkCommandPool::releaseResources() {

View File

@ -28,7 +28,6 @@ public:
void reset(GrVkGpu* gpu);
void releaseResources();
GrVkPrimaryCommandBuffer* getPrimaryCommandBuffer() { return fPrimaryCommandBuffer.get(); }
@ -55,6 +54,8 @@ private:
GrVkCommandPool(GrVkGpu* gpu, VkCommandPool commandPool, GrVkPrimaryCommandBuffer*);
void releaseResources();
void freeGPUData() const override;
bool fOpen = true;

View File

@ -398,7 +398,6 @@ void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet
}
GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
SkAutoMutexExclusive lock(fBackgroundMutex);
GrVkCommandPool* result;
if (fAvailableCommandPools.count()) {
result = fAvailableCommandPools.back();
@ -440,10 +439,17 @@ void GrVkResourceProvider::checkCommandBuffers() {
GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
if (buffer->finished(fGpu)) {
fActiveCommandPools.removeShuffle(i);
// This passes ownership of the pool to the backgroundReset call. The pool should
// not be used again from this function.
// TODO: We should see if we can use sk_sps here to make this more explicit.
this->backgroundReset(pool);
SkASSERT(pool->unique());
pool->reset(fGpu);
// After resetting the pool (specifically releasing the pool's resources) we may
// have called a client callback proc which may have disconnected the GrVkGpu. In
// that case we do not want to push the pool back onto the cache, but instead just
// drop the pool.
if (fGpu->disconnected()) {
pool->unref();
return;
}
fAvailableCommandPools.push_back(pool);
}
}
}
@ -506,14 +512,11 @@ void GrVkResourceProvider::destroyResources() {
}
fActiveCommandPools.reset();
{
SkAutoMutexExclusive lock(fBackgroundMutex);
for (GrVkCommandPool* pool : fAvailableCommandPools) {
SkASSERT(pool->unique());
pool->unref();
}
fAvailableCommandPools.reset();
for (GrVkCommandPool* pool : fAvailableCommandPools) {
SkASSERT(pool->unique());
pool->unref();
}
fAvailableCommandPools.reset();
// We must release/destroy all command buffers and pipeline states before releasing the
// GrVkDescriptorSetManagers. Additionally, we must release all uniform buffers since they hold
@ -526,7 +529,6 @@ void GrVkResourceProvider::destroyResources() {
}
void GrVkResourceProvider::releaseUnlockedBackendObjects() {
SkAutoMutexExclusive lock(fBackgroundMutex);
for (GrVkCommandPool* pool : fAvailableCommandPools) {
SkASSERT(pool->unique());
pool->unref();
@ -534,35 +536,6 @@ void GrVkResourceProvider::releaseUnlockedBackendObjects() {
fAvailableCommandPools.reset();
}
void GrVkResourceProvider::backgroundReset(GrVkCommandPool* pool) {
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
SkASSERT(pool->unique());
pool->releaseResources();
// After releasing resources we may have called a client callback proc which may have
// disconnected the GrVkGpu. In that case we do not want to push the pool back onto the cache,
// but instead just drop the pool.
if (fGpu->disconnected()) {
pool->unref();
return;
}
SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
if (taskGroup) {
taskGroup->add([this, pool]() {
this->reset(pool);
});
} else {
this->reset(pool);
}
}
void GrVkResourceProvider::reset(GrVkCommandPool* pool) {
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
SkASSERT(pool->unique());
pool->reset(fGpu);
SkAutoMutexExclusive lock(fBackgroundMutex);
fAvailableCommandPools.push_back(pool);
}
void GrVkResourceProvider::storePipelineCacheData() {
if (this->pipelineCache() == VK_NULL_HANDLE) {
return;

View File

@ -215,10 +215,6 @@ public:
// objects from the cache are probably not worth the complexity of safely releasing them.
void releaseUnlockedBackendObjects();
void backgroundReset(GrVkCommandPool* pool);
void reset(GrVkCommandPool* pool);
#if GR_TEST_UTILS
void resetShaderCacheForTesting() const { fPipelineStateCache->release(); }
#endif
@ -315,10 +311,8 @@ private:
// Array of command pools that we are waiting on
SkSTArray<4, GrVkCommandPool*, true> fActiveCommandPools;
SkMutex fBackgroundMutex;
// Array of available command pools that are not in flight
SkSTArray<4, GrVkCommandPool*, true> fAvailableCommandPools SK_GUARDED_BY(fBackgroundMutex);
SkSTArray<4, GrVkCommandPool*, true> fAvailableCommandPools;
// Stores GrVkSampler objects that we've already created so we can reuse them across multiple
// GrVkPipelineStates