Internal optimization with m_pBlockVectors

This commit is contained in:
Adam Sawicki 2021-12-02 16:39:27 +01:00
parent ad9c5bf8d7
commit b4d341de13

View File

@ -13775,6 +13775,8 @@ void VmaDefragmentationContext_T::AddAllocations(
pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex]; pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
if(!pBlockVectorDefragCtx) if(!pBlockVectorDefragCtx)
{ {
VMA_ASSERT(m_hAllocator->m_pBlockVectors[memTypeIndex] && "Trying to use unsupported memory type!");
pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
m_hAllocator, m_hAllocator,
VMA_NULL, // hCustomPool VMA_NULL, // hCustomPool
@ -14770,24 +14772,27 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
{ {
const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); // Create only supported types
if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0)
m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( {
this, const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
VK_NULL_HANDLE, // hParentPool m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
memTypeIndex, this,
preferredBlockSize, VK_NULL_HANDLE, // hParentPool
0, memTypeIndex,
SIZE_MAX, preferredBlockSize,
GetBufferImageGranularity(), 0,
pCreateInfo->frameInUseCount, SIZE_MAX,
false, // explicitBlockSize GetBufferImageGranularity(),
false, // linearAlgorithm pCreateInfo->frameInUseCount,
0.5f, // priority (0.5 is the default per Vulkan spec) false, // explicitBlockSize
GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment false, // linearAlgorithm
VMA_NULL); // // pMemoryAllocateNext 0.5f, // priority (0.5 is the default per Vulkan spec)
// No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
// becase minBlockCount is 0. VMA_NULL); // // pMemoryAllocateNext
// No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
// becase minBlockCount is 0.
}
} }
} }
@ -15114,7 +15119,7 @@ VkResult VmaAllocator_T::AllocateMemoryOfType(
} }
VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex]; VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
VMA_ASSERT(blockVector); VMA_ASSERT(blockVector && "Trying to use unsupported memory type!");
const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize(); const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
bool preferDedicatedMemory = bool preferDedicatedMemory =
@ -15652,6 +15657,7 @@ void VmaAllocator_T::FreeMemory(
{ {
const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
pBlockVector = m_pBlockVectors[memTypeIndex]; pBlockVector = m_pBlockVectors[memTypeIndex];
VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!");
} }
pBlockVector->Free(allocation); pBlockVector->Free(allocation);
} }
@ -15685,8 +15691,8 @@ void VmaAllocator_T::CalculateStats(VmaStats* pStats)
for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
{ {
VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
VMA_ASSERT(pBlockVector); if (pBlockVector != VMA_NULL)
pBlockVector->AddStats(pStats); pBlockVector->AddStats(pStats);
} }
// Process custom pools. // Process custom pools.
@ -16054,10 +16060,9 @@ VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
// Process default pools. // Process default pools.
for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
{ {
if(((1u << memTypeIndex) & memoryTypeBits) != 0) VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
if(pBlockVector != VMA_NULL)
{ {
VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
VMA_ASSERT(pBlockVector);
VkResult localRes = pBlockVector->CheckCorruption(); VkResult localRes = pBlockVector->CheckCorruption();
switch(localRes) switch(localRes)
{ {
@ -16688,20 +16693,24 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
bool allocationsStarted = false; bool allocationsStarted = false;
for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
{ {
if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false) VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex];
if(pBlockVector != VMA_NULL)
{ {
if(allocationsStarted == false) if (pBlockVector->IsEmpty() == false)
{ {
allocationsStarted = true; if (allocationsStarted == false)
json.WriteString("DefaultPools"); {
json.BeginObject(); allocationsStarted = true;
json.WriteString("DefaultPools");
json.BeginObject();
}
json.BeginString("Type ");
json.ContinueString(memTypeIndex);
json.EndString();
pBlockVector->PrintDetailedMap(json);
} }
json.BeginString("Type ");
json.ContinueString(memTypeIndex);
json.EndString();
m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
} }
} }
if(allocationsStarted) if(allocationsStarted)