Added internal class VmaDefragmentationAlgorithm_Fast::FeeSpaceDatabase.

Defragmentation algorithm is pretty much finished now!
This commit is contained in:
Adam Sawicki 2018-12-06 18:06:08 +01:00
parent 2af57d7f47
commit ae3970387e

View File

@ -5935,6 +5935,111 @@ private:
size_t origBlockIndex; size_t origBlockIndex;
}; };
class FreeSpaceDatabase
{
public:
FreeSpaceDatabase()
{
FreeSpace s = {};
s.blockInfoIndex = SIZE_MAX;
for(size_t i = 0; i < MAX_COUNT; ++i)
{
m_FreeSpaces[i] = s;
}
}
void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
{
if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
{
return;
}
// Find first invalid or the smallest structure.
size_t bestIndex = SIZE_MAX;
for(size_t i = 0; i < MAX_COUNT; ++i)
{
// Empty structure.
if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
{
bestIndex = i;
break;
}
if(m_FreeSpaces[i].size < size &&
(bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
{
bestIndex = i;
}
}
if(bestIndex != SIZE_MAX)
{
m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
m_FreeSpaces[bestIndex].offset = offset;
m_FreeSpaces[bestIndex].size = size;
}
}
bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
{
size_t bestIndex = SIZE_MAX;
VkDeviceSize bestFreeSpaceAfter = 0;
for(size_t i = 0; i < MAX_COUNT; ++i)
{
// Structure is valid.
if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
{
const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
// Allocation fits into this structure.
if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
{
const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
(dstOffset + size);
if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
{
bestIndex = i;
bestFreeSpaceAfter = freeSpaceAfter;
}
}
}
}
if(bestIndex != SIZE_MAX)
{
outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
{
// Leave this structure for remaining empty space.
const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
}
else
{
// This structure becomes invalid.
m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
}
return true;
}
return false;
}
private:
static const size_t MAX_COUNT = 4;
struct FreeSpace
{
size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
VkDeviceSize offset;
VkDeviceSize size;
} m_FreeSpaces[MAX_COUNT];
};
const bool m_OverlappingMoveSupported; const bool m_OverlappingMoveSupported;
uint32_t m_AllocationCount; uint32_t m_AllocationCount;
@ -5947,6 +6052,7 @@ private:
void PreprocessMetadata(); void PreprocessMetadata();
void PostprocessMetadata(); void PostprocessMetadata();
void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
}; };
struct VmaBlockDefragmentationContext struct VmaBlockDefragmentationContext
@ -12365,6 +12471,8 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
// THE MAIN ALGORITHM // THE MAIN ALGORITHM
FreeSpaceDatabase freeSpaceDb;
size_t dstBlockInfoIndex = 0; size_t dstBlockInfoIndex = 0;
size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex; size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex); VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
@ -12382,6 +12490,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
!end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); ) !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
{ {
VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation; VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
const VkDeviceSize srcAllocSize = srcSuballocIt->size; const VkDeviceSize srcAllocSize = srcSuballocIt->size;
if(m_AllocationsMoved == maxAllocationsToMove || if(m_AllocationsMoved == maxAllocationsToMove ||
m_BytesMoved + srcAllocSize > maxBytesToMove) m_BytesMoved + srcAllocSize > maxBytesToMove)
@ -12390,12 +12499,82 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
break; break;
} }
const VkDeviceSize srcAllocOffset = srcSuballocIt->offset; const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
VkDeviceSize dstAllocOffset = VmaAlignUp(dstOffset, pAlloc->GetAlignment());
// Try to place it in one of free spaces from the database.
size_t freeSpaceInfoIndex;
VkDeviceSize dstAllocOffset;
if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
freeSpaceInfoIndex, dstAllocOffset))
{
size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
// Same block
if(freeSpaceInfoIndex == srcBlockInfoIndex)
{
VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
// MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
VmaSuballocation suballoc = *srcSuballocIt;
suballoc.offset = dstAllocOffset;
suballoc.hAllocation->ChangeOffset(dstAllocOffset);
m_BytesMoved += srcAllocSize;
++m_AllocationsMoved;
VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
++nextSuballocIt;
pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
srcSuballocIt = nextSuballocIt;
InsertSuballoc(pFreeSpaceMetadata, suballoc);
VmaDefragmentationMove move = {
srcOrigBlockIndex, freeSpaceOrigBlockIndex,
srcAllocOffset, dstAllocOffset,
srcAllocSize };
moves.push_back(move);
}
// Different block
else
{
// MOVE OPTION 2: Move the allocation to a different block.
VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
VmaSuballocation suballoc = *srcSuballocIt;
suballoc.offset = dstAllocOffset;
suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
m_BytesMoved += srcAllocSize;
++m_AllocationsMoved;
VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
++nextSuballocIt;
pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
srcSuballocIt = nextSuballocIt;
InsertSuballoc(pFreeSpaceMetadata, suballoc);
VmaDefragmentationMove move = {
srcOrigBlockIndex, freeSpaceOrigBlockIndex,
srcAllocOffset, dstAllocOffset,
srcAllocSize };
moves.push_back(move);
}
}
else
{
dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
// If the allocation doesn't fit before the end of dstBlock, forward to next block. // If the allocation doesn't fit before the end of dstBlock, forward to next block.
while(dstBlockInfoIndex < srcBlockInfoIndex && while(dstBlockInfoIndex < srcBlockInfoIndex &&
dstAllocOffset + srcAllocSize > dstBlockSize) dstAllocOffset + srcAllocSize > dstBlockSize)
{ {
// But before that, register remaining free space at the end of dst block.
freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
++dstBlockInfoIndex; ++dstBlockInfoIndex;
dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex; dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex); pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
@ -12422,6 +12601,8 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
if(skipOver) if(skipOver)
{ {
freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
dstOffset = srcAllocOffset + srcAllocSize; dstOffset = srcAllocOffset + srcAllocSize;
++srcSuballocIt; ++srcSuballocIt;
} }
@ -12471,6 +12652,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
} }
} }
} }
}
m_BlockInfos.clear(); m_BlockInfos.clear();
@ -12590,6 +12772,20 @@ void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
} }
} }
void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
{
// TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
while(it != pMetadata->m_Suballocations.end())
{
if(it->offset < suballoc.offset)
{
++it;
}
}
pMetadata->m_Suballocations.insert(it, suballoc);
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// VmaBlockVectorDefragmentationContext // VmaBlockVectorDefragmentationContext