Merge pull request #109 from JustSid/master

Fixed a race condition with incremental defragmentation.
This commit is contained in:
Adam Sawicki 2020-03-31 17:01:25 +02:00 committed by GitHub
commit a39951c716
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -6523,6 +6523,7 @@ public:
VkCommandBuffer commandBuffer); VkCommandBuffer commandBuffer);
void DefragmentationEnd( void DefragmentationEnd(
class VmaBlockVectorDefragmentationContext* pCtx, class VmaBlockVectorDefragmentationContext* pCtx,
uint32_t flags,
VmaDefragmentationStats* pStats); VmaDefragmentationStats* pStats);
uint32_t ProcessDefragmentations( uint32_t ProcessDefragmentations(
@ -13180,7 +13181,21 @@ void VmaBlockVector::Defragment(
void VmaBlockVector::DefragmentationEnd( void VmaBlockVector::DefragmentationEnd(
class VmaBlockVectorDefragmentationContext* pCtx, class VmaBlockVectorDefragmentationContext* pCtx,
uint32_t flags,
VmaDefragmentationStats* pStats) VmaDefragmentationStats* pStats)
{
if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
{
VMA_ASSERT(pCtx->mutexLocked == false);
// Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
// lock protecting us. Since we mutate state here, we have to take the lock out now
m_Mutex.LockWrite();
pCtx->mutexLocked = true;
}
// If the mutex isn't locked we didn't do any work and there is nothing to delete.
if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
{ {
// Destroy buffers. // Destroy buffers.
for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;) for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
@ -13188,8 +13203,7 @@ void VmaBlockVector::DefragmentationEnd(
VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex]; VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
if(blockCtx.hBuffer) if(blockCtx.hBuffer)
{ {
(*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)( (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
} }
} }
@ -13197,6 +13211,7 @@ void VmaBlockVector::DefragmentationEnd(
{ {
FreeEmptyBlocks(pStats); FreeEmptyBlocks(pStats);
} }
}
if(pCtx->mutexLocked) if(pCtx->mutexLocked)
{ {
@ -14117,7 +14132,7 @@ VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
for(size_t i = m_CustomPoolContexts.size(); i--; ) for(size_t i = m_CustomPoolContexts.size(); i--; )
{ {
VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i]; VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats); pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
vma_delete(m_hAllocator, pBlockVectorCtx); vma_delete(m_hAllocator, pBlockVectorCtx);
} }
for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; ) for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
@ -14125,7 +14140,7 @@ VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i]; VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
if(pBlockVectorCtx) if(pBlockVectorCtx)
{ {
pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats); pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
vma_delete(m_hAllocator, pBlockVectorCtx); vma_delete(m_hAllocator, pBlockVectorCtx);
} }
} }